blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a74a29995ff38d8248e33eb1ea0c4866318702c0 | 9e4875e14163ac36df01dd55d16569ea3cfc6c15 | /courier/urls.py | f9de696eea5b0db58dc8f1d16c8cca03faedb904 | [] | no_license | iamlaboniraz/e_courier | cdb51528352a2e364aa5c41b5b6981d676e06ba8 | 1f9ef5d6b882e038104b9fba94b72982fca9e435 | refs/heads/master | 2020-05-19T08:36:07.327860 | 2019-05-05T06:17:48 | 2019-05-05T06:17:48 | 184,924,770 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,165 | py | """courier URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
from django.conf import settings
from django.conf.urls.static import static
from delivery import views
# for api_views
import delivery.api_views
urlpatterns = [
path('admin/', admin.site.urls),
path('', views.first_page, name='first_page'),
path('home/', views.home, name='home'),
path('confirm/', views.confirm, name='confirm'),
path('delivery_request/', views.delivery_request, name='delivery_request'),
path('send_driver/', views.send_driver, name='send_driver'),
path('signup/', views.signup, name='signup'),
path('admin_account/', views.admin_account, name='admin_account'),
path('personal_account/', views.personal_account, name='personal_account'),
path('accounts/', include('django.contrib.auth.urls')),
path('send_driver/',views.send_driver,name='send_driver'),
# api_views path
path('api/drivers/',delivery.api_views.DriverList.as_view()),
path('api/drivers/new',delivery.api_views.DriverCreate.as_view()),
path('api/drivers/<int:id>/destroy',delivery.api_views.DriverDestroy.as_view()),
path('api/drivers/<int:id>',delivery.api_views.DriverRetrieveUpdateDestroy.as_view()),
]
# if settings.DEBUG:
# urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns +=static(settings.STATIC_URL,document_root=settings.STATIC_ROOT)
urlpatterns +=static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)
| [
"sharif.bkash2019@gmail.com"
] | sharif.bkash2019@gmail.com |
355ea3438068cb566e9bb686ad11c0e9bbcbe658 | 20f86ba7febb3233516f0e2161bc98604c302cc4 | /tests/acceptance/page_model/base_page.py | 165ad2e4da698093c6351465a36d33bb5fb659c4 | [] | no_license | LukaszMalucha/Machine-Learning-Dashboard | 5369270ff39710f2c6545ea0828f01265d7e797f | 3ee29498d7d74365a2cd33547795ddfe9573dac4 | refs/heads/master | 2022-12-10T05:08:52.693425 | 2019-03-14T11:15:54 | 2019-03-14T11:15:54 | 126,514,014 | 8 | 3 | null | 2022-12-08T01:33:30 | 2018-03-23T16:52:05 | Python | UTF-8 | Python | false | false | 1,023 | py | from tests.acceptance.locators.base_page import BasePageLocators
class BasePage:
def __init__(self, driver):
self.driver = driver
@property
def url(self):
return 'http://127.0.0.1:5000'
@property
def title(self):
return self.driver.find_element(*BasePageLocators.TITLE)
@property
def navigation(self):
return self.driver.find_elements(*BasePageLocators.NAV_LINKS)
@property
def dropdown(self):
return self.driver.find_element(*BasePageLocators.DROPDOWN)
@property
def dropdown_links(self):
return self.driver.find_elements(*BasePageLocators.DROPDOWN_LINKS)
@property
def table(self):
return self.driver.find_element(*BasePageLocators.TABLE)
@property
def github_user(self):
return self.driver.find_element(*BasePageLocators.GITHUB_USER)
@property
def github_repos(self):
return self.driver.find_element(*BasePageLocators.GITHUB_REPOS)
| [
"lucasmalucha@gmail.com"
] | lucasmalucha@gmail.com |
9074864fdeb2c55bd6d26974e16e43289c93fea1 | dcba7c6df1a6560c66e0fb86d0659d160f2b3308 | /prog2-master-konyv/Malmo/SteveOkos.py | 043d2d016aa401e49ca00a6b887dc281c75bdbc0 | [] | no_license | RubiMaistro/prog2 | 9be1a7dde0190b79b5cc4524c3ce881ea523ab59 | 2b1743ea251b81b7de089d3fdbf7b9b59dc08377 | refs/heads/master | 2023-01-30T19:53:29.259291 | 2020-11-30T19:12:25 | 2020-11-30T19:12:25 | 297,921,633 | 1 | 0 | null | null | null | null | ISO-8859-2 | Python | false | false | 15,206 | py | from __future__ import print_function
# ------------------------------------------------------------------------------------------------
# Copyright (c) 2016 Microsoft Corporation
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# ------------------------------------------------------------------------------------------------
# Tutorial sample #2: Run simple mission using raw XML
# Added modifications by Norbert Bátfai (nb4tf4i) batfai.norbert@inf.unideb.hu, mine.ly/nb4tf4i.1
# 2018.10.18, https://bhaxor.blog.hu/2018/10/18/malmo_minecraft
# 2020.02.02, NB4tf4i's Red Flowers, http://smartcity.inf.unideb.hu/~norbi/NB4tf4iRedFlowerHell
# 2020.03.02, https://github.com/nbatfai/RedFlowerHell
# 2020.03.07, "_smartSteve": nof_turn (number of turns) is replaced by the dict self.collectedFlowers
# 2020.03.11, "_bu": bottom up, s4v3: https://youtu.be/VP0kfvRYD1Y
from builtins import range
import MalmoPython
import os
import sys
import time
import random
import json
import math
if sys.version_info[0] == 2:
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0) # flush print output immediately
else:
import functools
print = functools.partial(print, flush=True)
# Create default Malmo objects:
agent_host = MalmoPython.AgentHost()
try:
agent_host.parse( sys.argv )
except RuntimeError as e:
print('ERROR:',e)
print(agent_host.getUsage())
exit(1)
if agent_host.receivedArgument("help"):
print(agent_host.getUsage())
exit(0)
# -- set up the mission -- #
missionXML_file='nb4tf4i_d.xml'
with open(missionXML_file, 'r') as f:
print("NB4tf4i's Red Flowers (Red Flower Hell) - DEAC-Hackers Battle Royale Arena\n")
print("NB4tf4i vörös pipacsai (Vörös Pipacs Pokol) - DEAC-Hackers Battle Royale Arena\n")
print("The aim of this first challenge, called nb4tf4i's red flowers, is to collect as many red flowers as possible before the lava flows down the hillside.\n")
print("Ennek az első, az nb4tf4i vörös virágai nevű kihívásnak a célja összegyűjteni annyi piros virágot, amennyit csak lehet, mielőtt a láva lefolyik a hegyoldalon.\n")
print("Norbert Bátfai, batfai.norbert@inf.unideb.hu, https://arato.inf.unideb.hu/batfai.norbert/\n")
print("Version history\n", "Code: ", sys.argv[0], ", series 4 v.3, bottom up, max 18 poppies. Norbert Bátfai, nbatfai@gmail.com\n")
print("Loading mission from %s" % missionXML_file)
mission_xml = f.read()
my_mission = MalmoPython.MissionSpec(mission_xml, True)
my_mission.drawBlock( 0, 0, 0, "lava")
class Hourglass:
def __init__(self, charSet):
self.charSet = charSet
self.index = 0
def cursor(self):
self.index=(self.index+1)%len(self.charSet)
return self.charSet[self.index]
hg = Hourglass('|/-\|')
class Steve:
def __init__(self, agent_host):
self.agent_host = agent_host
self.x = 0
self.y = 0
self.z = 0
self.yaw = 0
self.pitch = 0
self.front_of_me_idx = 0
self.front_of_me_idxr = 0
self.front_of_me_idxl = 0
self.right_of_me_idx = 0
self.left_of_me_idx = 0
self.nof_red_flower = 0
self.lookingat = ""
self.attackLvl = 0
self.collectedFlowers = {}
for i in range(100):
self.collectedFlowers[i] = False
self.collectedFlowers[1] = True
self.collectedFlowers[2] = True
def checkInventory(self, observations):
for i in range(2):
hotbari = 'Hotbar_'+str(i)+'_item'
hotbars = 'Hotbar_'+str(i)+'_size'
slot0_contents = observations.get(hotbari, "")
if slot0_contents == "red_flower":
slot0_size = observations.get(hotbars, "")
if self.nof_red_flower < slot0_size :
self.nof_red_flower = slot0_size
print(" A RED FLOWER IS MINED AND PICKED UP")
print(" Steve's lvl: ", self.y, "Flower lvl: ", self.attackLvl)
self.collectedFlowers[self.attackLvl] = True
self.agent_host.sendCommand( "look -1" )
time.sleep(.4)
if self.lvlUp(observations.get("nbr3x3", 0)):
return True
def pickUp(self):
self.agent_host.sendCommand( "attack 1" )
time.sleep(.23)
self.attackLvl = self.y
def lvlUp(self, nbr):
if self.collectedFlowers[self.y]:
self.lvlUpJump(nbr)
return True
else:
return False
def idle(self, delay):
#print(" SLEEPING for ", delay)
time.sleep(delay)
def isInTrap(self, nbr):
dc = 0
nbri = [9,10,11,12,14,15,16,17]
for i in range(1, len(nbri)):
if nbr[nbri[i]]=="dirt" :
dc = dc + 1
return dc > 5
def turnFromWall(self, nbr):
if (nbr[self.right_of_me_idx+9]=="air" and nbr[self.left_of_me_idx+9]=="dirt") or (nbr[self.right_of_me_idx]=="air" and nbr[self.left_of_me_idx]=="dirt"):
self.agent_host.sendCommand( "turn 1" )
else:
self.agent_host.sendCommand( "turn -1" )
time.sleep(.15)
def lvlUpJump(self, nbr):
if (nbr[self.right_of_me_idx+9]=="air" and nbr[self.left_of_me_idx+9]=="dirt") or (nbr[self.right_of_me_idx]=="air" and nbr[self.left_of_me_idx]=="dirt"):
self.agent_host.sendCommand( "jumpstrafe -1" )
else:
self.agent_host.sendCommand( "jumpstrafe 1" )
time.sleep(.1)
def calcNbrIndex(self):
if self.yaw >= 180-22.5 and self.yaw <= 180+22.5 :
self.front_of_me_idx = 1
self.front_of_me_idxr = 2
self.front_of_me_idxl = 0
self.right_of_me_idx = 5
self.left_of_me_idx = 3
elif self.yaw >= 180+22.5 and self.yaw <= 270-22.5 :
self.front_of_me_idx = 2
self.front_of_me_idxr = 5
self.front_of_me_idxl =1
self.right_of_me_idx = 8
self.left_of_me_idx = 0
elif self.yaw >= 270-22.5 and self.yaw <= 270+22.5 :
self.front_of_me_idx = 5
self.front_of_me_idxr = 8
self.front_of_me_idxl = 2
self.right_of_me_idx = 7
self.left_of_me_idx = 1
elif self.yaw >= 270+22.5 and self.yaw <= 360-22.5 :
self.front_of_me_idx = 8
self.front_of_me_idxr = 7
self.front_of_me_idxl = 5
self.right_of_me_idx = 6
self.left_of_me_idx = 2
elif self.yaw >= 360-22.5 or self.yaw <= 0+22.5 :
self.front_of_me_idx = 7
self.front_of_me_idxr = 6
self.front_of_me_idxl = 8
self.right_of_me_idx = 3
self.left_of_me_idx = 5
elif self.yaw >= 0+22.5 and self.yaw <= 90-22.5 :
self.front_of_me_idx = 6
self.front_of_me_idxr = 3
self.front_of_me_idxl = 7
self.right_of_me_idx = 0
self.left_of_me_idx = 8
elif self.yaw >= 90-22.5 and self.yaw <= 90+22.5 :
self.front_of_me_idx = 3
self.front_of_me_idxr = 0
self.front_of_me_idxl = 6
self.right_of_me_idx = 1
self.left_of_me_idx = 7
elif self.yaw >= 90+22.5 and self.yaw <= 180-22.5 :
self.front_of_me_idx = 0
self.front_of_me_idxr = 1
self.front_of_me_idxl = 3
self.right_of_me_idx = 2
self.left_of_me_idx = 6
else:
print("There is great disturbance in the Force...")
def whatISee(self, observations):
self.lookingat = "NOTHING"
if "LineOfSight" in observations:
lineOfSight = observations["LineOfSight"]
self.lookingat = lineOfSight["type"]
def whatMyPos(self, observations):
if "Yaw" in observations:
self.yaw = int(observations["Yaw"])
if "Pitch" in observations:
self.pitch = int(observations["Pitch"])
if "XPos" in observations:
self.x = int(observations["XPos"])
if "ZPos" in observations:
self.z = int(observations["ZPos"])
if "YPos" in observations:
self.y = int(observations["YPos"])
def run(self):
world_state = self.agent_host.getWorldState()
# Loop until mission ends:
while world_state.is_mission_running:
#print(">>> nb4tf4i arena -----------------------------------\n")
act = self.action(world_state)
#print("nb4tf4i arena >>> -----------------------------------\n")
if not act:
self.idle(.017)
world_state = self.agent_host.getWorldState()
def action(self, world_state):
for error in world_state.errors:
print("Error:", error.text)
if world_state.number_of_observations_since_last_state == 0:
#print(" NO OBSERVATIONS NO ACTIONS")
return False
input = world_state.observations[-1].text
observations = json.loads(input)
nbr = observations.get("nbr3x3", 0)
#print(observations)
self.whatMyPos(observations)
print("\r Steve's Coords: ", self.x, self.y, self.z, end='')
#print(" Steve's Yaw: ", self.yaw)
#print(" Steve's Pitch: ", self.pitch)
self.checkInventory(observations)
#print("Number of flowers: ", self.nof_red_flower)
self.whatISee(observations)
#print(" Steve's <): ", self.lookingat)
self.calcNbrIndex()
if self.isInTrap(nbr) :
self.agent_host.sendCommand( "jumpmove 1" )
time.sleep(.1)
self.turnFromWall(nbr)
self.agent_host.sendCommand( "jumpmove 1" )
time.sleep(.1)
return True
if self.lookingat == "red_flower":
print(" A RED FLOWER IS FOUND (lookingat)")
self.pickUp()
return True
for i in range(9):
if nbr[i]=="red_flower" or nbr[i+9]=="red_flower" or nbr[i+18]=="red_flower":
print(" I CAN SEE A RED FLOWER: ", i, " LEVEL ", self.y)
if i == self.front_of_me_idx :
print("F A RED FLOWER IS RIGTH IN FRONT OF ME")
self.agent_host.sendCommand( "move 1" )
time.sleep(.2)
self.agent_host.sendCommand( "look 1" )
time.sleep(.2)
print("Steve <) ", self.lookingat)
return True
elif i == self.front_of_me_idxr :
print("R A RED FLOWER IS RIGTH IN RIGHT OF ME")
self.agent_host.sendCommand( "strafe 1" )
time.sleep(.2)
return True
elif i == self.front_of_me_idxl :
print("L A RED FLOWER IS RIGTH IN LEFT OF ME")
self.agent_host.sendCommand( "strafe -1" )
time.sleep(.2)
return True
elif i == 4 :
self.red_flower_is_mining = True
print(" I AM STANDING ON A RED FLOWER!!!")
if self.pitch != 90:
self.agent_host.sendCommand( "look 1" )
print("PITCH I AM STANDING ON A RED FLOWER!!!")
time.sleep(.3)
else:
print("ATTACK I AM STANDING ON A RED FLOWER!!! LEVEL ", self.y)
self.pickUp()
self.agent_host.sendCommand( "look -1" )
time.sleep(.3)
return True
else :
print(" I AM TURNING TO A RED FLOWER")
self.agent_host.sendCommand( "turn 1" )
time.sleep(.2)
return True
if self.lvlUp(nbr):
print(" LVL UP")
if nbr[self.front_of_me_idx+9]!="air" and nbr[self.front_of_me_idx+9]!="red_flower":
print(" THERE ARE OBSTACLES IN FRONT OF ME ", nbr[self.front_of_me_idx], end='')
self.turnFromWall(nbr)
else:
print(" THERE IS NO OBSTACLE IN FRONT OF ME", end='')
if nbr[self.front_of_me_idx]=="dirt":
self.agent_host.sendCommand( "move 1" )
time.sleep(.013)
else:
self.turnFromWall(nbr)
return True
num_repeats = 1
for ii in range(num_repeats):
my_mission_record = MalmoPython.MissionRecordSpec()
# Attempt to start a mission:
max_retries = 6
for retry in range(max_retries):
try:
agent_host.startMission( my_mission, my_mission_record )
break
except RuntimeError as e:
if retry == max_retries - 1:
print("Error starting mission:", e)
exit(1)
else:
print("Attempting to start the mission:")
time.sleep(2)
# Loop until mission starts:
print(" Waiting for the mission to start")
world_state = agent_host.getWorldState()
while not world_state.has_mission_begun:
print("\r"+hg.cursor(), end="")
time.sleep(0.15)
world_state = agent_host.getWorldState()
for error in world_state.errors:
print("Error:",error.text)
print("NB4tf4i Red Flower Hell running\n")
steve = Steve(agent_host)
steve.run()
print("Number of flowers: "+ str(steve.nof_red_flower))
time.sleep(3)
print("Mission ended")
# Mission has ended.
| [
"talingermark@gmail.com"
] | talingermark@gmail.com |
5ef5a1070dca06e738ce73db72454c583ffa8acc | ddda9924134a94d54a1ed65f813846126fc7b3da | /.local/share/Trash/files/0003_remove_earnings_jan_code.py | ec4e78a3fe190c6e8b2e24a3c60abe9e5d82a313 | [] | no_license | MONTAINI/Jihanki | 27b6443b180096bf059733274a33ee0d99945a3c | 001258b951d6147509c5e3fef0b67b9f28f0748b | refs/heads/master | 2022-10-11T20:11:18.276496 | 2018-05-08T10:21:59 | 2018-05-08T10:21:59 | 132,592,296 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-01-24 02:49
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('jihanki', '0002_auto_20180124_1129'),
]
operations = [
migrations.RemoveField(
model_name='earnings',
name='jan_code',
),
]
| [
"kd1223626@st.kobedenshi.ac.jp"
] | kd1223626@st.kobedenshi.ac.jp |
49d527238fc069e4b21c3acafe4fa68c4da3bffe | 8a5051b14a5030b3c3c39a548784cbcc0023db80 | /Practice/Pra0720/htmlRunner.py | 70f4ec54dd5c91028cc612e67e449b39159a27b9 | [] | no_license | Anapplewj/Learn_ceshi | 468b20e51727edf6f61a6480e968414b299c40e2 | 34cc5fe3514529eb56fccc1fb47103151d093ecb | refs/heads/master | 2023-06-26T13:38:39.687151 | 2021-08-02T14:17:11 | 2021-08-02T14:17:11 | 387,635,013 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,227 | py | # 测试报告
import HTMLTestRunner
import os
import sys
import time
import unittest
def createsuite():
discovers = unittest.defaultTestLoader.discover("../Pra0720", pattern="testbaidu*.py", top_level_dir=None)
print(discovers)
return discovers
if __name__=="__main__":
#1,创建一个文件夹
curpath = sys.path[0]
print(sys.path)
print(sys.path[0])
# 当当前路径下resultreport文件夹不存在的时候,就创建一个
if not os.path.exists(curpath+'/resultreport'):
os.makedirs(curpath+'/resultreport')
# 2,解决重复命名的问题
now = time.strftime("%Y-%m-%d-%H %M %S", time.localtime(time.time()))
print(time.time())
print(time.localtime(time.time()))
# 文件名是路径加上文件的名称
filename = curpath + '/resultreport/'+ now + 'resultreport.html'
# 打开 HTML 文件, wb 以写的方式
with open(filename, 'wb') as fp:
# 括号里的参数是 HTML 报告里面的参数
runner = HTMLTestRunner.HTMLTestRunner(stream=fp, title=u"测试报告",
description=u"用例执行情况", verbosity=2)
suite = createsuite()
runner.run(suite)
| [
"2648175705@qq.com"
] | 2648175705@qq.com |
103422c3c7919d61071b76587342ae3776906b3b | 75c6e20dd5d8a43264c9d050e61f0069af995be6 | /src/metrics/meteor.py | 872da5dbfb19b4e2e47e0205949136f27d01910b | [] | no_license | armor-ai/RRGen | 0f480b558fe250658b6036b7ce48456a8b9de912 | 182ae87adf2a03818e3da000770e0f878f1f3534 | refs/heads/master | 2021-11-27T12:50:23.584628 | 2021-11-16T12:23:23 | 2021-11-16T12:23:23 | 204,915,823 | 10 | 6 | null | 2021-11-16T12:23:24 | 2019-08-28T11:28:28 | Python | UTF-8 | Python | false | false | 4,107 | py | #!/usr/bin/env python
import subprocess, threading
from scorer import Scorer
from reference import Reference
class MeteorError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class MeteorScorer(Scorer):
"""
Python wrapper for the METEOR metric. Starts a METEOR process and keeps it alive, so that the model
can be kept in memeory. Arguments are the meteor language abbreviation and the path to the METEOR
installation. They need to be specified as follows:"meteor_language=lg,meteor_path=path" (any order).
"""
def __init__(self, argument_string):
Scorer.__init__(self, argument_string)
# Lock for the METEOR process, which can only handle one request at a time:
self.lock = threading.Lock()
# Get necessary arguments for starting METEOR from argument string parsed in Scorer.__init__()
self._meteor_language = self._arguments["meteor_language"]
self._meteor_path = self._arguments["meteor_path"] + "/"
# Start a METEOR process:
command = "java -Xmx2G -jar " + self._meteor_path + "meteor-*.jar - - -l " + self._meteor_language + " -stdio"
self.meteor_process = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True)
def set_reference(self, reference_tokens):
"""
Construct a MeteorReference from a sequence of tokens and make it the reference against which the scorer evaluates hypotheses.
This can be done any time.
"""
self.lock.acquire()
self._reference = MeteorReference(reference_tokens, self)
self.lock.release()
def terminate_process(self):
"""
Waits for the current request to be processed and terminates the METEOR process.
"""
self.lock.acquire()
self.meteor_process.terminate()
self.lock.release()
def kill_process(self):
"""
Kills the METEOR process right away.
"""
self.meteor_process.kill()
class MeteorReference(Reference):
"""
METEOR reference object, against which hypotheses can be scored.
"""
def __init__(self, reference_tokens, meteor_scorer):
Reference.__init__(self, reference_tokens)
# Construct reference string from tokens
self._reference_string = " ".join(reference_tokens)
self._meteor_scorer = meteor_scorer
def score(self, hypothesis_tokens):
# Construct hypothesis string from hypothesis tokens:
hypothesis_string = " ".join(hypothesis_tokens)
# Acquire lock to make sure METEOR process is not in use:
self._meteor_scorer.lock.acquire()
# Score hypothesis string against reference string
try:
self._meteor_scorer.meteor_process.stdin.write(
"SCORE ||| " + self._reference_string + " ||| " + hypothesis_string + "\n")
except:
raise MeteorError(
"Meteor returned the following error: " + self._meteor_scorer.meteor_process.stderr.readline().strip())
# Read feature values from process output
std_out = self._meteor_scorer.meteor_process.stdout.readline()
# Pass feature values to METEOR process for computation of the final score
try:
self._meteor_scorer.meteor_process.stdin.write("EVAL ||| " + std_out)
except:
raise MeteorError(
"Meteor returned the following error: " + self._meteor_scorer.meteor_process.stderr.readline().strip())
std_out = self._meteor_scorer.meteor_process.stdout.readline()
# Release the process lock
self._meteor_scorer.lock.release()
# Check if Meteor returned a score:
try:
n = float(std_out)
except:
raise MeteorError(
"Meteor returned the following error: " + self._meteor_scorer.meteor_process.stderr.readline().strip())
# Return final score
return n | [
"gcyydxf@gmail.com"
] | gcyydxf@gmail.com |
fc4dd2aeebba0b006b2c867b0c71b235f777c216 | 4737df4162bee6abc7b78d1e8b4930d2cb542d6b | /graphgallery/nn/layers/pytorch/conv/dagnn.py | 24f060cea3fb5cf39695a42498b8ea286a211594 | [
"MIT"
] | permissive | freebird3366/GraphGallery | d1aa4ff291753ccf0ac4a8e024d18c59d2db8aa8 | f3294dad35ca0e14a525ed48f18feae2e9af661f | refs/heads/master | 2023-02-23T20:04:30.316450 | 2021-02-01T16:06:03 | 2021-02-01T16:06:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,228 | py | import torch
import torch.nn as nn
from graphgallery.nn.init.pytorch import uniform, zeros
from ..get_activation import get_activation
class PropConvolution(nn.Module):
def __init__(self,
in_channels,
out_channels=1,
K=10,
use_bias=False,
activation=None):
super().__init__()
assert out_channels == 1, "'out_channels' must be 1"
self.in_channels = in_channels
self.out_channels = out_channels
self.activation = get_activation(activation)
self.w = nn.Linear(in_channels, out_channels, bias=use_bias)
self.K = K
def reset_parameters(self):
self.w.reset_parameters()
def forward(self, x, adj):
propagations = [x]
for _ in range(self.K):
x = torch.spmm(adj, x)
propagations.append(x)
h = torch.stack(propagations, axis=1)
retrain_score = self.w(h)
retrain_score = self.activation(retrain_score).permute(0, 2, 1).contiguous()
out = (retrain_score @ h).squeeze(1)
return out
def __repr__(self):
return f"{self.__class__.__name__}({self.in_channels} -> {self.out_channels})"
| [
"cnljt@outlook.com"
] | cnljt@outlook.com |
1ae07b33347f44bf3c60562333c44393cefdf74d | 28d5198225147b8600f95e131019a207ecb4b5f6 | /src/basic_graph_generator/analysis.py | ef2cbf1a8294180f35611470c974cfe0304ad718 | [] | no_license | YIWANG3/Movie-Network-18755 | 17cd52eca8b9280d4bc3e22d9ecfdf5198535ecc | fe355f15653ed224f5d3de95bd14679917f37805 | refs/heads/master | 2023-01-27T20:19:23.994568 | 2020-12-07T19:08:20 | 2020-12-07T19:08:20 | 301,864,612 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 378 | py | import pandas as pd
import networkx as nx
actors_edges = pd.read_csv('actor_edge.csv')
# actors_nodes = pd.read_csv('actor_nodes.csv')
#init Graph
G = nx.Graph()
G = nx.from_pandas_edgelist(actors_edges,"Source","Target")
#betweenness centrality
bet_centrality = nx.betweenness_centrality(G, k = 1000)
#small-world
sigma = nx.sigma(G)
print("Small world sigma is " + sigma) | [
"liangkui.liu@west.cmu.edu"
] | liangkui.liu@west.cmu.edu |
1ab873bd85061445bdf41740d3ceb7fe395a363d | 2ca866397a281bdcb6e756b7874c4bb648f6091d | /Firecode.io/Find the Transpose of a Square Matrix/code.py | 729736f70ba724d60f44b57ce833f3eadbfb92be | [
"MIT"
] | permissive | therealmaxkim/Playground | 68588def5477607185e02ef3a0927791b3100e2a | 29c3513464bee07efaac1e41686bf391b52d475d | refs/heads/master | 2020-04-27T15:27:11.501967 | 2019-03-15T04:50:35 | 2019-03-15T04:50:35 | 174,446,877 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,258 | py | '''
Questions
1. Can the matrix be empty?
2. By "square matrix", does that mean that the length of x and y length of matrix is the same?
3.
Observations
1. x and y length of matrix are the same
2. transposing is just rotating the matrix 90 degrees clockwise.
3. must be done in place - which means we will have to swap somewhere
4. if we have to swap, what and where would we swap? we would swap the lower element with the element on the right. Think of
moving in two directions - south and east at the same time and swapping.
5. how many times would we have to do this swap? after doing this on the first vertical column, what about the second column?
123
123
123
after swapping the first column
111
223
323
then you would move down (south) 1 and then right (east) 1. repeat same process
111
222
333
steps
1. determine the initial starting place to swapping
2. swap
keep repeating these two steps.
'''
def transpose_matrix(matrix):
def swap(matrix, starting_index):
for i in range(starting_index, len(matrix)-1):
matrix[starting_index][i+1], matrix[i+1][starting_index] = matrix[i+1][starting_index], matrix[starting_index][i+1]
for i in range(len(matrix)):
swap(matrix, i)
| [
"noreply@github.com"
] | therealmaxkim.noreply@github.com |
d01b19407f11e44934d9ccff64821499dbe7fd5d | 8c9318f6d8a9ad688cb5129ded786c9045bf17a8 | /Conversion/AutoChangeJson/pylib/e_create_BattleTable.py | 294d1e420d19234f7e6af3d090c3e0610e98f723 | [] | no_license | MiYanLiang/GameCommon | b0ab92fc2f0bc4c0fc35eb783c3882cdcec36b37 | ab72e930bd5a798b5e9e2fa9c697d505603bfad4 | refs/heads/master | 2020-08-01T20:50:01.307345 | 2020-01-30T14:00:40 | 2020-01-30T14:00:40 | 211,109,723 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,480 | py | #encoding : utf-8
from e_base import *
def export_json(xls, fn):
f = create_file(fn)
if f != None:
reader = xls_reader.XLSReader()
cfgs = reader.GetSheetByIndex(xls, 14, 1)
if cfgs != None:
f.write("{\n")
s = "\t\"BattleTable\": [\n"
for c in cfgs:
ri = RowIndex(len(c))
ss = "\t\t{\n"
ss += "\t\t\t\"id\": \"" + conv_int(c[ri.Next()]) + "\",\n"
ss += "\t\t\t\"battle\": \"" + conv_str_bin(c[ri.Next()]) + "\",\n"
ss += "\t\t\t\"battleIntro\": \"" + conv_str_bin(c[ri.Next()]) + "\",\n"
ss += "\t\t\t\"forceId\": \"" + conv_str_bin(c[ri.Next()]) + "\",\n"
ss += "\t\t\t\"startYear\": \"" + conv_int(c[ri.Next()]) + "\",\n"
ss += "\t\t\t\"cityId\": \"" + conv_str_bin(c[ri.Next()]) + "\",\n"
ss += "\t\t\t\"adventurePrefab\": \"" + conv_str_bin(c[ri.Next()]) + "\",\n"
ss += "\t\t\t\"testPrefab\": \"" + conv_str_bin(c[ri.Next()]) + "\",\n"
ss += "\t\t\t\"battlePrefab\": \"" + conv_str_bin(c[ri.Next()]) + "\"\n"
ss += "\t\t},\n"
s += ss
s = s[:-2]
s += "\n"
s += "\t]\n"
s += "}"
f.write(s)
else:
print('sheed %s get failed.' % 'cfg')
f.close()
def export_bin(xls, fn):
pass | [
"39160433+MiYanLiang@users.noreply.github.com"
] | 39160433+MiYanLiang@users.noreply.github.com |
0180fb50fcc9a71e70b3ccce51b1092d8db51019 | 09ecd5f17ff36896c141db58563de3887d3f627d | /src/accounts/forms.py | ce6f9e63345af563ce7d020d907191aa2146429a | [] | no_license | samirthapa20/tweetme | df9b43bc8be4975343a54cceebba0f259ab6a6dd | 23d77575b85f8f6ff5d8993d3bbbf3898c1e6671 | refs/heads/master | 2021-05-23T15:32:18.136662 | 2020-09-09T14:18:37 | 2020-09-09T14:18:37 | 253,362,442 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 953 | py | from django import forms
from django.contrib.auth import get_user_model
User = get_user_model()
class UserRegisterForm(forms.Form):
username = forms.CharField()
email = forms.EmailField()
password = forms.CharField(widget = forms.PasswordInput)
password2 = forms.CharField(widget= forms.PasswordInput)
def clean_password2(self):
password = self.cleaned_data.get('password')
password2 = self.cleaned_data.get('password2')
if password != password2:
raise forms.ValidationError('Password must match')
return password2
def clean_username(self):
username = self.cleaned_data.get('username')
if User.objects.filter(username__icontains=username).exists():
raise forms.ValidationError("This username is taken")
return username
def clean_email(self):
email = self.cleaned_data.get('email')
if User.objects.filter(email__icontains=email).exists():
raise forms.ValidationError("This email is already taken.")
return email
| [
"samirthapa021@gmail.com"
] | samirthapa021@gmail.com |
3b7127815c49008171e48bd701f138836b011151 | af0d79499d11eb66a8e6be94668dfb1743282e6f | /probemon.py | 6637919e55ddf1d4765b69e149e580e9e2e1c491 | [] | no_license | ericlee123/protecc | 9404f573b430aaea2cd64b4a87b635215d3c046c | 5d1796e1485df47c2a607db34835004ae6a1676c | refs/heads/master | 2021-09-03T20:27:13.139980 | 2018-01-11T19:25:47 | 2018-01-11T19:25:47 | 112,278,295 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,177 | py | #!/usr/bin/python
import time
import datetime
import argparse
import netaddr
import sys
import logging
from scapy.all import *
from pprint import pprint
from logging.handlers import RotatingFileHandler
NAME = 'probemon'
DESCRIPTION = "a command line tool for logging 802.11 probe request frames"
DEBUG = False
def build_packet_callback(time_fmt, logger, delimiter, mac_info, ssid, rssi):
def packet_callback(packet):
if not packet.haslayer(Dot11):
return
# we are looking for management frames with a probe subtype
# if neither match we are done here
if packet.type != 0 or packet.subtype != 0x04:
return
# list of output fields
fields = []
# determine preferred time format
log_time = str(int(time.time()))
if time_fmt == 'iso':
log_time = datetime.datetime.now().isoformat()
fields.append(log_time)
# append the mac address itself
fields.append(packet.addr2)
# parse mac address and look up the organization from the vendor octets
if mac_info:
try:
parsed_mac = netaddr.EUI(packet.addr2)
fields.append(parsed_mac.oui.registration().org)
except netaddr.core.NotRegisteredError, e:
fields.append('UNKNOWN')
# include the SSID in the probe frame
if ssid:
fields.append(packet.info)
if rssi:
rssi_val = -(256-ord(packet.notdecoded[-4:-3]))
fields.append(str(rssi_val))
logger.info(delimiter.join(fields))
return packet_callback
def main():
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument('-i', '--interface', help="capture interface")
parser.add_argument('-t', '--time', default='iso', help="output time format (unix, iso)")
parser.add_argument('-o', '--output', default='probemon.log', help="logging output location")
parser.add_argument('-b', '--max-bytes', default=5000000, help="maximum log size in bytes before rotating")
parser.add_argument('-c', '--max-backups', default=99999, help="maximum number of log files to keep")
parser.add_argument('-d', '--delimiter', default='\t', help="output field delimiter")
parser.add_argument('-f', '--mac-info', action='store_true', help="include MAC address manufacturer")
parser.add_argument('-s', '--ssid', action='store_true', help="include probe SSID in output")
parser.add_argument('-r', '--rssi', action='store_true', help="include rssi in output")
parser.add_argument('-D', '--debug', action='store_true', help="enable debug output")
parser.add_argument('-l', '--log', action='store_true', help="enable scrolling live view of the logfile")
args = parser.parse_args()
if not args.interface:
print "error: capture interface not given, try --help"
sys.exit(-1)
DEBUG = args.debug
# setup our rotating logger
logger = logging.getLogger(NAME)
logger.setLevel(logging.INFO)
handler = RotatingFileHandler(args.output, maxBytes=args.max_bytes, backupCount=args.max_backups)
logger.addHandler(handler)
if args.log:
logger.addHandler(logging.StreamHandler(sys.stdout))
built_packet_cb = build_packet_callback(args.time, logger,
args.delimiter, args.mac_info, args.ssid, args.rssi)
sniff(iface=args.interface, prn=built_packet_cb, store=0)
if __name__ == '__main__':
main()
| [
"ericlee123@gmail.com"
] | ericlee123@gmail.com |
fc83b5353e23b602a769fcad442dc78a846bd780 | 0a4da495a82d50499c62da53b2b14b3f00435930 | /bachelor's degree thesis/code/undirected/training and validation/MergeVectorsDicts.py | ed0e4018f0e2fff281c0ed992865b2426bbcbe4c | [] | no_license | honkaLab/LinkPrediction | b45b87c48dce01182716e30d12d8ef21165a0376 | 56ae9eee3c976cf156b753d1f61aa559f20a521b | refs/heads/master | 2021-01-21T09:33:57.745217 | 2015-04-23T12:37:21 | 2015-04-23T12:37:21 | 34,158,007 | 0 | 0 | null | 2015-04-18T08:12:01 | 2015-04-18T08:12:00 | null | UTF-8 | Python | false | false | 867 | py |
import InitialData
import pickle
# ###merge 9####
# #filesNames, fullFilesNames = InitialData.FileWalker("./temp data/Vectors1970_1985/9")
# vectorsDict = {}
# for fullFileName in fullFilesNames:
# vectorDictFile = open(fullFileName)
# vectorDict = pickle.load(vectorDictFile)
# for i in vectorDict:
# if i not in vectorsDict:
# vectorsDict[i] = {}
# vectorsDict[i].update(vectorDict[i])
# vectorsDictFile = open("./temp data/Vectors1970_1985/Vectors_9", "w")
# pickle.dump(vectorsDict, vectorsDictFile)
vectorsDict = {}
filesNames, fullFilesNames = InitialData.FileWalker("./temp data/Vectors1970_1985_bak")
for fullFileName in fullFilesNames:
print fullFileName
vectorDictFile = open(fullFileName)
vectorsDict.update(pickle.load(vectorDictFile))
vectorsDictFile = open("./temp data/Vectors1970_1985", "w")
pickle.dump(vectorsDict, vectorsDictFile)
| [
"khuanghonka@gmail.com"
] | khuanghonka@gmail.com |
50e19834c25fc3ed009d2f1c8b61258d6d2a5c71 | 2b11437bdb92aff2e5d9298024ffe0d14bf29b2a | /Support/Python/run.py | 43c9b7409b5331c534100ba92f46363d7fdda021 | [] | no_license | LibreCrops/cdef | a1f15bec1354fc9a620cf1045ad337d0d55be169 | 6d7c4ea84f5312710075a27a26acee578a226e9c | refs/heads/master | 2020-04-09T21:25:17.329401 | 2018-02-03T05:35:44 | 2018-02-03T05:35:44 | 42,525,476 | 17 | 5 | null | null | null | null | UTF-8 | Python | false | false | 146 | py | #!/usr/bin/env python3
import sys
from pdbxml.session import Session
s = Session()
for i in sys.argv[1:]:
s.load(i)
s.write_header()
| [
"rectigu@gmail.com"
] | rectigu@gmail.com |
7fb4f71a9ccc64dc5c65d6bf095c6e49af56ef7a | 6820e74ec72ed67f6b84a071cef9cfbc9830ad74 | /plans/migrations/0008_auto_20150401_2155.py | dbca0bd39f59f421e402b58652c15b1cbd599a57 | [
"MIT"
] | permissive | AppforallHQ/f5 | 96c15eaac3d7acc64e48d6741f26d78c9ef0d8cd | 0a85a5516e15d278ce30d1f7f339398831974154 | refs/heads/master | 2020-06-30T17:00:46.646867 | 2016-11-21T11:41:59 | 2016-11-21T11:41:59 | 74,357,925 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,808 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import jsonfield.fields
import plans.models
class Migration(migrations.Migration):
dependencies = [
('plans', '0007_auto_20150330_0046'),
]
operations = [
migrations.CreateModel(
name='ItemInvoice',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('amount', models.IntegerField(validators=[plans.models.neg_validator])),
('plan_amount', models.IntegerField(validators=[plans.models.neg_validator])),
('created_at', models.DateTimeField(auto_now_add=True)),
('paid', models.BooleanField(default=False)),
('pay_time', models.DateTimeField(null=True, blank=True)),
('invalid', models.BooleanField(default=False)),
('metadata', jsonfield.fields.JSONField()),
('generated_promo_code', models.ForeignKey(related_name='+', to='plans.PromoCode')),
('plan', models.ForeignKey(to='plans.Plan', null=True)),
('promo_code', models.ForeignKey(default=None, blank=True, to='plans.PromoCode', null=True)),
],
options={
},
bases=(models.Model,),
),
migrations.RemoveField(
model_name='giftinvoice',
name='generated_promo_code',
),
migrations.RemoveField(
model_name='giftinvoice',
name='plan',
),
migrations.RemoveField(
model_name='giftinvoice',
name='promo_code',
),
migrations.DeleteModel(
name='GiftInvoice',
),
]
| [
"hi@appforall.ir"
] | hi@appforall.ir |
daa48e468031bfb45e4ab8ba1f2c4bb486c569e9 | 23efd02cdfd781df6b7bd8f7c168094ca78621f4 | /tf_object_detection/Object_detection_video.py | 45dde01a4c93f123988187d0df85cd2235235188 | [] | no_license | chrisharrisengineer/blood_machine | ed15ae9031d537f571b0046d762349bb53c1b9c7 | 62a7d63de87f2563b5e3f4521cc5b7b5f7475ca1 | refs/heads/master | 2020-09-03T19:45:58.534976 | 2020-02-27T22:11:59 | 2020-02-27T22:11:59 | 219,550,716 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,888 | py | ######## Video Object Detection Using Tensorflow-trained Classifier #########
#
# Author: Evan Juras
# Date: 1/16/18
# Description:
# This program uses a TensorFlow-trained classifier to perform object detection.
# It loads the classifier uses it to perform object detection on a video.
# It draws boxes and scores around the objects of interest in each frame
# of the video.
## Some of the code is copied from Google's example at
## https://github.com/tensorflow/models/blob/master/research/object_detection/object_detection_tutorial.ipynb
## and some is copied from Dat Tran's example at
## https://github.com/datitran/object_detector_app/blob/master/object_detection_app.py
## but I changed it to make it more understandable to me.
# Import packages
import os
import cv2
import numpy as np
import tensorflow as tf
import sys
import time
# This is needed since the notebook is stored in the object_detection folder.
sys.path.append("..")
# Import utilites
from utils import label_map_util
from utils import visualization_utils as vis_util
# Name of the directory containing the object detection module we're using
MODEL_NAME = 'inference_graph'
VIDEO_NAME = 'video_12_fps_Trim_Trim.avi'
# Grab path to current working directory
CWD_PATH = os.getcwd()
# Path to frozen detection graph .pb file, which contains the model that is used
# for object detection.
PATH_TO_CKPT = os.path.join(CWD_PATH,MODEL_NAME,'frozen_inference_graph.pb')
# Path to label map file
PATH_TO_LABELS = os.path.join(CWD_PATH,'images_for_training_and_testing','labelmap.pbtxt')
# Path to video
PATH_TO_VIDEO = os.path.join(CWD_PATH,VIDEO_NAME)
# Number of classes the object detector can identify
NUM_CLASSES = 2
count = 0
# Load the label map.
# Label maps map indices to category names, so that when our convolution
# network predicts `5`, we know that this corresponds to `king`.
# Here we use internal utility functions, but anything that returns a
# dictionary mapping integers to appropriate string labels would be fine
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
# Load the Tensorflow model into memory.
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
sess = tf.Session(graph=detection_graph)
# Define input and output tensors (i.e. data) for the object detection classifier
# Input tensor is the image
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Output tensors are the detection boxes, scores, and classes
# Each box represents a part of the image where a particular object was detected
detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represents level of confidence for each of the objects.
# The score is shown on the result image, together with the class label.
detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
# Number of objects detected
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# Open video file
video = cv2.VideoCapture(PATH_TO_VIDEO)
while(video.isOpened()):
start_time = time.time()
# Acquire frame and expand frame dimensions to have shape: [1, None, None, 3]
# i.e. a single-column array, where each item in the column has the pixel RGB value
ret, frame = video.read()
frame_expanded = np.expand_dims(frame, axis=0)
# Perform the actual detection by running the model with the image as input
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores, detection_classes, num_detections],
feed_dict={image_tensor: frame_expanded})
# Draw the results of the detection (aka 'visulaize the results')
vis_util.visualize_boxes_and_labels_on_image_array(
frame,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=8,
min_score_thresh=0.60)
# All the results have been drawn on the frame, so it's time to display it.
cv2.imshow('Object detector', frame)
count += 1
print('iteration %d: %.3f sec'%(count, time.time()-start_time))
# Press 'q' to quit
if cv2.waitKey(1) == ord('q'):
break
# Clean up
video.release()
cv2.destroyAllWindows()
| [
"noreply@github.com"
] | chrisharrisengineer.noreply@github.com |
efae6ca9511c27c47979a9cd69c00bb20507e76c | 68ba4b270dd0e781395a0f2da8bc2ffd46694e4e | /dashboard/migrations/0008_remove_bookings_end_date.py | 058ca4e64684db00593bb18111f9d11b75a94d3e | [] | no_license | KENNOH/dhabiti-Constructors | 3a19e876fd6c099f9eb1278b183c1ea32c6c095c | 16c33641143e4a1f0c7736573ea95881334fffb8 | refs/heads/master | 2020-12-18T11:21:08.693199 | 2020-03-10T08:13:43 | 2020-03-10T08:13:43 | 235,361,508 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 341 | py | # Generated by Django 3.0.2 on 2020-02-17 12:40
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('dashboard', '0007_bookings'),
]
operations = [
migrations.RemoveField(
model_name='bookings',
name='end_date',
),
]
| [
"Kennoh Muriithi"
] | Kennoh Muriithi |
dfe8824b3c6790359bf3a97fe707d836c42659c1 | d44cf6869d8285546862b224c9c12ec07bb4becb | /python/PE025.py | 90cd48cd4175380836b4faaacd9297c947e0a254 | [] | no_license | jaehaaheaj/ProjectEuler | 3c25ab0abb9bf11bd9a4f0f68271f0fad8978523 | 6d5b1f8d4c3ecf9cbeca1aaf9eca2d6fcdf912c1 | refs/heads/master | 2021-01-19T20:50:31.155252 | 2018-10-07T14:03:45 | 2018-10-07T14:03:45 | 88,565,239 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 117 | py | index = 2
(f1, f2) = (1, 1)
target = 1000
while(len(str(f2))<target):
(f1, f2) = (f2, f1+f2)
index+=1
print(index) | [
"jaehaaheaj@gmail.com"
] | jaehaaheaj@gmail.com |
05732ad356ad1ac90bd6b8ad5e17dd86b79f84a3 | 8daf711629d2895e9bccd2216356cc6584485d91 | /venv/bin/f2py3 | 1ef32b5ed12c2d914e8c0b2a43a3f4a6d5dcd567 | [] | no_license | Jolium/OpenCV_Python | 518c6fc85c03b04b3a328c4ea373ae7db6ad3642 | eac213e6333e8538fece21a9fe17752b9fd9060f | refs/heads/master | 2022-09-25T17:41:40.904882 | 2020-06-04T17:57:31 | 2020-06-04T17:57:31 | 269,428,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 256 | #!/home/joao/PycharmProjects/OpenCV_Python/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from numpy.f2py.f2py2e import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"joao@example.com"
] | joao@example.com | |
04bb85ca1bdd439c34b6adc124e725772daf1bad | d22a2fbb9adb82644c5665242661bad172550552 | /venv/Scripts/easy_install-3.7-script.py | e05bf4e5e8b133bdefcaf94f129dcc7bb1a6c3db | [] | no_license | felipemanfrin/Python-Zero-ao-Zeno | e98ba3e4b974e88801b8bc947f461b125bc665b8 | d6d08aa17071f77170bbd105452b0d05586131c8 | refs/heads/master | 2022-07-29T19:38:41.729178 | 2020-05-25T01:02:18 | 2020-05-25T01:02:18 | 265,356,280 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 455 | py | #!C:\Users\Felipe\PycharmProjects\666\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.7')()
)
| [
"felipemanfrin@gmail.com"
] | felipemanfrin@gmail.com |
16093e48e8ea6d2c734900b39e59e698fffa2edc | 29bec83fc600720533ad2bcf17fc90cd9ca385b7 | /0x06-python-classes/prueba_OOP.py | 19e79b242fb1d7fd13ef39f02007e7cc9e743a28 | [] | no_license | VictorZ94/holbertonschool-higher_level_programming | 73a7f504cde583f43f641e18e692e062610870a4 | ad512a1c76dc9b4c999a0ba2922c79f56206dd98 | refs/heads/master | 2023-03-25T04:38:12.708766 | 2021-03-24T01:08:47 | 2021-03-24T01:08:47 | 291,826,914 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 433 | py | #!/usr/bin/python3
class coche():
largochasis = 250
anchochasis = 120
ruedas = 4
enmarcha = False
def arrancar(self):
self.enmarcha=True
def estado(self):
if (self.enmarcha):
return "El coche está en marcha"
else:
return "El coche está parado"
micoche=coche()
print(micoche.largochasis)
print(micoche.ruedas)
# micoche.arrancar()
(print(micoche.estado())) | [
"zrvictor@outlook.com"
] | zrvictor@outlook.com |
e481e895d5ff00e83038d323d64c822c1dabc1f8 | b84ed844e81757a27b00d2ac2a471617f130abff | /studyNotes/grammar/tuple-test.py | ad807912946e042a08d8a2092b196d294027b220 | [] | no_license | hurtnotbad/pythonStudy | 530e353dd7b3c6e02e5fd63bada95dd2442f94a6 | 19062fc006fac6b9f9cc638fcedad8bacca8433f | refs/heads/master | 2020-03-19T11:35:20.509148 | 2018-07-24T15:20:34 | 2018-07-24T15:20:34 | 136,463,644 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,516 | py | """
元组(tuple):和数组很类似,区别在于:
1、元组的数据不能修改(类似于java枚举)
2、定义的时候用() 而列表用[]
例子:
info_tuple = ("ZhangSan", 18, 1.75)
使用场景:
1、 函数的 参数 或 返回值,可任意传入和返回多个数据
2、格式化字符串
3、让列表不可以修改,类似枚举
"""
def tuple_test(test):
if not test:
return
info_tuple = ("ZhangSan", 18, 1.75)
print(type(info_tuple))
print(info_tuple[2])
# 空元组
empty_tuple = ()
print(type(empty_tuple))
# 单元组
single_tuple = (5,) # 定义成(5) 则解释器认为整型了
# index 18第一次出现的索引
index = info_tuple.index(18)
# 统计出现的次数
number = info_tuple.count(1.75)
print("18出现的索引值为: %d" % index)
print(number)
# 遍历
for tuple_member in info_tuple:
print(tuple_member)
# 格式化字符串,后面的"( )" 本质上就是元组
print("%s 年龄是 %d 身高是 %.2f" % ("ZhangSan", 18, 1.75))
print("%s 年龄是 %d 身高是 %.2f" % info_tuple)
tuple_test(False)
def list_tuple_convert(test):
if not test:
return
num_list = [1, 2, 3, 4]
# list to tuple 数据能修改变为不可修改,保护数据
num_tuple = tuple(num_list)
print(type(num_tuple))
# tuple to list 数据从不能修改表为能
num_list2 = list(num_tuple)
print(type(num_list2))
list_tuple_convert(False)
| [
"1181942512@qq.com"
] | 1181942512@qq.com |
2c6be602e4b5ffaada4af8308a8ad2a9193bfc78 | 382cb993cd97d8b4b59261b4179b76532678190a | /WindowsApplication/11Sep19_WinAppGrid.py | 70144ce529b686243b97136e3819351d02c60ae0 | [] | no_license | skumardotpython/JulyBatch | 4d0dd315a3532aa6c2970b64f6eede3cb1286138 | 42623fa58360b087fcca552013a6929339d9358f | refs/heads/master | 2022-10-25T12:05:12.476477 | 2019-09-13T03:00:23 | 2019-09-13T03:00:23 | 198,754,742 | 0 | 1 | null | 2022-10-03T15:21:13 | 2019-07-25T04:03:07 | Python | UTF-8 | Python | false | false | 315 | py | # !/usr/bin/python3
from tkinter import *
root = Tk( )
def Calculalte():
pass
e1 = Entry(root)
e1.pack
var = 0
for r in range(3):
for c in range(3):
var = var + 1
print(c, end=' ')
Button(root, border=1, text=str(var), command=Calculalte).grid(row=r, column=c)
root.mainloop() | [
"skumard30@yahoo.co.in"
] | skumard30@yahoo.co.in |
611712fc838ebdb37c0fba3d0011a518d63cb5ca | 48f3d3a9eedb747234aa32a32128679c7cd4cd5a | /fitenv/bin/pyrsa-sign | 74c5ac59d6436e90c392c90c45a4f6ca683712e0 | [] | no_license | WhiteLedAnn/MGPUFitTrack | 2352a74d546d65ad60ed28994e7446cb12eabdb8 | db44032ba3022c57530da8871b5b058e830941e7 | refs/heads/master | 2021-05-18T14:53:48.211561 | 2020-09-29T13:19:45 | 2020-09-29T13:19:45 | 251,286,432 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 245 | #!/home/wl/Projects/MGPUFitTrack/fitenv/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from rsa.cli import sign
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(sign())
| [
"nonameyetithink@mail.ru"
] | nonameyetithink@mail.ru | |
24a052014b0230933cb2040245f58f4c8c21cb35 | 1378937ff05a4353c71df49fea75f04bc1f70fc9 | /tests/test_shrink.py | f4eb7d61b06177d3802cb5f885f809ede71c5e4a | [
"BSD-3-Clause"
] | permissive | h-bryant/funcsim | a37c15aca88a17ac3c02f961db106393dc4b5f39 | 6f0ec2365e3ed6d9478e2f92e755cebafaf6528d | refs/heads/master | 2022-12-22T18:42:07.474869 | 2020-09-25T14:55:14 | 2020-09-25T14:55:14 | 113,094,901 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 995 | py | import numpy as np
import sys
import os
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"../"))
import shrinkage
def test_shrink():
targets = [shrinkage.target_a, shrinkage.target_b,
shrinkage.target_c, shrinkage.target_d,
shrinkage.target_f]
def calc_all_norms(seed, mu, r, n):
np.random.seed(seed)
x = np.random.multivariate_normal(mu, r, size=n)
return np.array([np.linalg.norm(tgt(x)[0] - r) for tgt in targets])
def test_0():
mu = np.array([10.0, 5.0, 0.0])
rho = np.array([
[1, 0.9, 0.9],
[0.9, 1.0, 0.9],
[0.9, 0.9, 1.0]])
variances = ([
[1.0, 0.0, 0.0],
[0.0, 2.0, 0.0],
[0.0, 0.0, 5.0]])
r = np.dot(np.dot(variances, rho), variances)
all_norms = calc_all_norms(seed=1, mu=mu, r=r, n=20)
assert abs(sum(all_norms) - 27.5470609894) < 0.01
| [
"h-bryant@tamu.edu"
] | h-bryant@tamu.edu |
44063120d777dd88864c0a6de49ff0d427fb3fb0 | f29c36c5795fae33bf9b1d32c6694331a6531bb5 | /w13_Main.py | ec8a70682a106a532369e14348570e93b8508ba2 | [] | no_license | cbg293/p2_201611115 | c366c0af838e566e225f8eb71b38ea49aff7239c | e3bdcfc3981d28059d2438e5a33cc2ed04652d9a | refs/heads/master | 2021-01-21T14:12:33.378036 | 2016-06-04T09:02:53 | 2016-06-04T09:02:53 | 54,975,705 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,075 | py |
# coding: utf-8
# In[1]:
def File():
try:
fin1=open('python.txt','a')
fin2=open('outputNum.txt','r')
for line in fin2:
fin1.write(line)
fin1.close()
fin2.close()
except Exception as e:
print e
# In[2]:
import turtle
wn=turtle.Screen()
t1=turtle.Turtle()
def getCoordsFromFile():
fout=open('reccoords.txt')
mycoords=[]
for line in fout:
line1=line.split(',')
mycoords.append([(line1[0],line1[1]),(line1[2],line1[3].strip())])
for coord in mycoords:
x1=int(coord[0][0])
x2=int(coord[1][0])
y1=int(coord[0][1])
y2=int(coord[1][1])
coordlist=((x1,y1),(x2,y1),(x2,y2),(x1,y2),(x1,y1))
t1.penup()
t1.setpos(coordlist[0])
t1.pendown()
for c in coordlist:
t1.goto(c)
fout.close()
# In[ ]:
def lab1():
File()
def lab2():
getCoordsFromFile()
wn.exitonclick()
def main():
lab1()
lab2()
if __name__=="__main__":
main()
| [
"cbg293@naver.com"
] | cbg293@naver.com |
0961a0b1331e9e7e09b2a6a22ba077513ecbaf9f | e53502f629fb04445d08a05273307933434a6791 | /app/models.py | a102a210317707a6ee2adac1978427f21c7cdde7 | [] | no_license | kamilsamaj/microblog | e0834135a4ecc140c2ac0bd5f6d9b9ec2475f693 | f60187c5dd526f9b3b37e0b89edcfd80c77d9d24 | refs/heads/master | 2020-04-17T14:54:42.669557 | 2019-02-18T21:41:55 | 2019-02-18T21:41:55 | 166,677,215 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,614 | py | from app import db, login
from datetime import datetime
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import UserMixin
from hashlib import md5
# followers association table
followers = db.Table('followers',
db.Column('follower_id', db.Integer, db.ForeignKey('user.id')),
db.Column('followed_id', db.Integer, db.ForeignKey('user.id'))
)
class User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64), index=True, unique=True)
email = db.Column(db.String(128), index=True, unique=True)
password_hash = db.Column(db.String(128))
posts = db.relationship('Post', backref='author', lazy='dynamic')
about_me = db.Column(db.String(140))
last_seen = db.Column(db.DateTime, default=datetime.utcnow)
followed = db.relationship(
'User', secondary=followers,
primaryjoin=(followers.c.follower_id == id),
secondaryjoin=(followers.c.followed_id == id),
backref=db.backref('followers', lazy='dynamic'), lazy='dynamic')
def __repr__(self):
return '<User {}>'.format(self.username)
def set_password(self, password):
self.password_hash = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.password_hash, password)
def avatar(self, size=128):
digest = md5(self.email.lower().encode('utf-8')).hexdigest()
return 'https://www.gravatar.com/avatar/{}?d=identicon&s={}'.format(digest, size)
def follow(self, user):
if not self.is_following(user):
self.followed.append(user)
def unfollow(self, user):
if self.is_following(user):
self.followed.remove(user)
def is_following(self, user):
return self.followed.filter(
followers.c.followed_id == user.id).count() > 0
def followed_posts(self):
return Post.query.join(
followers, (followers.c.followed_id == Post.user_id).filter(
followers.c.follower_id == self.id).order_by(
Post.timestamp.desc()
)
)
class Post(db.Model):
id = db.Column(db.Integer, primary_key=True)
body = db.Column(db.String(140))
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
def __repr__(self):
return '<Post> {}>'.format(self.body)
@login.user_loader
def load_user(id):
return User.query.get(int(id))
| [
"kamil.samaj@accolade.com"
] | kamil.samaj@accolade.com |
41783475f6c5ea8a57e1088072cd1d5ac561a09d | 201c7cc46313bc00ac43be72dcfc83cd8ea579f2 | /python/ray/experimental/workflow/tests/conftest.py | 3635aa02a7f8045f1231f05401fde1229984348a | [
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] | permissive | PAL-ML/ray | a975b07a1b18825fab87d7a5aa8690a5e2d50ff4 | 6d884f1442c271329829ff0bceae588f3cd49d7d | refs/heads/master | 2023-06-23T12:00:41.013114 | 2021-07-22T14:59:50 | 2021-07-22T14:59:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,172 | py | import boto3
import pytest
from moto import mock_s3
from mock_server import * # noqa
from ray.experimental.workflow import storage
@pytest.fixture(scope="function")
def filesystem_storage():
# TODO: use tmp path once fixed the path issues
storage.set_global_storage(
storage.create_storage("/tmp/ray/workflow_data/"))
yield storage.get_global_storage()
@pytest.fixture(scope="function")
def aws_credentials():
import os
old_env = os.environ
os.environ["AWS_ACCESS_KEY_ID"] = "testing"
os.environ["AWS_SECRET_ACCESS_KEY"] = "testing"
os.environ["AWS_SECURITY_TOKEN"] = "testing"
os.environ["AWS_SESSION_TOKEN"] = "testing"
yield
os.environ = old_env
@pytest.fixture(scope="function")
def s3_storage(aws_credentials, s3_server):
with mock_s3():
client = boto3.client(
"s3", region_name="us-west-2", endpoint_url=s3_server)
client.create_bucket(Bucket="test_bucket")
url = ("s3://test_bucket/workflow"
f"?region_name=us-west-2&endpoint_url={s3_server}")
storage.set_global_storage(storage.create_storage(url))
yield storage.get_global_storage()
| [
"noreply@github.com"
] | PAL-ML.noreply@github.com |
d77adb64bb20f92e58ed5875f77951bd2db36b07 | c76c7a6b46dbe5e7d650307687743928724e49f9 | /setup.py | 3ba968b00cf7fd7f439f5de147f05c9cfee2fecd | [] | no_license | AGhost-7/snipsnip | fcbda1dce60421764bbd29c36519eef634a63404 | 802d2875f9c16f4c13574fac4050cd7c934de290 | refs/heads/master | 2021-04-27T12:29:14.606943 | 2019-06-06T23:45:27 | 2019-06-06T23:45:27 | 122,420,256 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 511 | py | from setuptools import setup
setup(
name='snipsnip',
version='0.8',
description='Remote mac clipboard',
url='http://github.com/AGhost-7/snipsnip',
author='Jonathan Boudreau',
author_email='jonathan.boudreau.92@gmail.com',
license='MIT',
keywords=['clipboard'],
zip_safe=False,
install_requires=[
'pyperclip >= 1.7.0',
'python-xlib >= 0.25;platform_system=="Linux"'
],
entry_points={
'console_scripts': ['snipsnip=snipsnip:main']
},
)
| [
"jonathan.boudreau.92@gmail.com"
] | jonathan.boudreau.92@gmail.com |
4dac38843a9e85e0789d5d2c2c36cb7ac02e1f60 | 02839adf12b107962f44ac08f52448d261c59fcf | /modules/logic/wires_complicated.py | 2aa8b43f051e6eaa77603b28855a3f994f5ba77b | [
"MIT"
] | permissive | AngelOnFira/keep-botting-and-no-one-explodes | cd34416e36a231c808d4b18af986e4a1c93060df | 1d096de714dabe9e40c66cdec364101957caa1ca | refs/heads/master | 2022-04-28T05:35:12.508830 | 2020-04-30T19:21:49 | 2020-04-30T19:21:49 | 207,931,287 | 0 | 0 | MIT | 2020-04-30T19:21:50 | 2019-09-12T00:32:14 | Python | UTF-8 | Python | false | false | 1,693 | py | # primary attributes: colors
# secondary attributes: star, led
# tertiary attributes: 2+ batt, parallel, sn even
has_two_plus_batt = lambda **kwargs : kwargs['two_plus_batt']
has_parallel = lambda **kwargs : kwargs['parallel']
is_sn_even = lambda **kwargs : kwargs['sn_even']
is_true = lambda **kwargs : True
is_false = lambda **kwargs : False
rules = {
'white': {
'none': is_true,
'star': is_true,
'led': is_false,
'both': has_two_plus_batt,
},
'red': {
'none': is_sn_even,
'star': is_true,
'led': has_two_plus_batt,
'both': has_two_plus_batt,
},
'blue': {
'none': is_sn_even,
'star': is_false,
'led': has_parallel,
'both': has_parallel,
},
'both': {
'none': is_sn_even,
'star': has_parallel,
'led': is_sn_even,
'both': is_false,
},
}
class WiresComplicated:
def solution(self, colors, has_star, has_led, battery_count, has_parallel, serial_number):
if 'red' in colors and 'blue' in colors:
primary = 'both'
elif 'red' in colors:
primary = 'red'
elif 'blue' in colors:
primary = 'blue'
else:
primary = 'white'
if has_star and has_led:
secondary = 'both'
elif has_star and not has_led:
secondary = 'star'
elif not has_star and has_led:
secondary = 'led'
else:
secondary = 'none'
validator = rules[primary][secondary]
return validator(
two_plus_batt=battery_count >= 2,
parallel=has_parallel,
sn_even=int(serial_number[-1:]) % 2 == 0) | [
"wizcardforest@gmail.com"
] | wizcardforest@gmail.com |
b04357c78c9053445018acd984f13ce64032f783 | d5ea3aa5a9e342ff7b21ea13599c4475e892c003 | /wheresit/apps/items/migrations/0001_initial.py | ccc34c09c9b4e3edf1e1669e75b8fc1e228db475 | [] | no_license | matthewstevens/wheresit | 5e1fabae6eee5297ef4533aa2182db5d940ad667 | 4257ef3b13f0bd49ee5e125fb4680882fb4cdb6b | refs/heads/master | 2021-01-01T15:55:18.439608 | 2012-07-11T13:55:42 | 2012-07-11T13:55:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,006 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Item'
db.create_table('items_item', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('item_type', self.gf('django.db.models.fields.CharField')(max_length=200)),
('name', self.gf('django.db.models.fields.CharField')(max_length=200)),
('meta_tags', self.gf('django.db.models.fields.CharField')(max_length=300)),
))
db.send_create_signal('items', ['Item'])
# Adding model 'BorrowedItem'
db.create_table('items_borroweditem', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('date_created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('borrower', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['profiles.Person'])),
('owner', self.gf('django.db.models.fields.related.ForeignKey')(related_name='owner', to=orm['profiles.Person'])),
('item', self.gf('django.db.models.fields.related.ForeignKey')(related_name='item', to=orm['items.Item'])),
))
db.send_create_signal('items', ['BorrowedItem'])
def backwards(self, orm):
# Deleting model 'Item'
db.delete_table('items_item')
# Deleting model 'BorrowedItem'
db.delete_table('items_borroweditem')
models = {
'items.borroweditem': {
'Meta': {'object_name': 'BorrowedItem'},
'borrower': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Person']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'item'", 'to': "orm['items.Item']"}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'owner'", 'to': "orm['profiles.Person']"})
},
'items.item': {
'Meta': {'object_name': 'Item'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_type': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'meta_tags': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'profiles.person': {
'Meta': {'object_name': 'Person'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['items'] | [
"matthew.stevens@gmail.com"
] | matthew.stevens@gmail.com |
57faab3f28289f33f49f08a7a6f053607e1d1ba1 | 7ffcf7db715030ba0828ca97985c21dcb05b9750 | /app.py | a60fde5540cf32a70c372e6661edb08389995e01 | [] | no_license | 54microlong/USCholar | 98a35522708b2d89c70808ef7e2f1222a5e618a7 | 9c70e019faa2b175d8aaf7684984453baa0fa7bb | refs/heads/master | 2021-01-01T17:22:38.833194 | 2015-02-25T07:36:12 | 2015-02-25T07:36:12 | 31,302,218 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 344 | py | #coding:utf-8
import os
from USCholar import uscApp
## when deploy to heroku please comment this line
from config import *
if __name__ == "__main__":
uscApp.config.from_object(__name__)
# [Heroku]load DB environment from env
#app.config['SQLALCHEMY_DATABASE_URI'] = os.environ['DATABASE_URL']
uscApp.run(port=PORT,host=HOST)
| [
"chenlong888@gmail.com"
] | chenlong888@gmail.com |
567713e54a8b7ca1a7d471366010f3aa3eb7088e | 728246fec461917951f5120043684ab1c69e597a | /Nand2tetris/07/vm_translator.py | caedf0e175b09d678b8e8cb37868261287e5c989 | [] | no_license | cxphoe/Introduction | ef7ea43c2b0f24a682c32e31c6abaf19f1fba01b | 5e27cb58aedaa2fab0ebe84d44240585d1d76a02 | refs/heads/master | 2021-09-11T00:54:57.536229 | 2018-04-05T05:34:57 | 2018-04-05T05:34:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,708 | py | """The vm_translator module provides translate function to translate vm language
to hack language in another file."""
import os
from Parser import Parser
from CodeWriter import CodeWriter
from vm_definitions import *
def is_vm_file(filename):
return filename.endswith('.vm')
def retrive_files(path):
has_sys_init = False
filepaths = []
dirpath, ext = os.path.splitext(path)
if ext != '':
if ext != '.vm':
raise ValueError("Except a vm file.")
filepaths.append(path)
outpath = dirpath + '.asm'
else:
outpath = os.path.join(path, os.path.basename(path) + '.asm')
filenames = filter(is_vm_file, os.listdir(path))
for name in filenames:
filepaths.append(os.path.join(path, name))
if name == 'Sys.vm':
has_sys_init = True
return has_sys_init, filepaths, outpath
def translate(path):
"""translate vm language in a vm file or files in the path to asm language
in other files.
path: dirpath or filepath
"""
has_sys_init, filepaths, outpath = retrive_files(path)
writer = CodeWriter(outpath)
if has_sys_init:
writer.write_sys_init()
for filepath in filepaths:
filename = os.path.basename(filepath)
writer.set_file_name(filename)
parser = Parser(filepath)
translate_file(parser, writer)
writer.close()
def translate_file(parser, writer):
"""translate vm language in a file to asm language in another file:
parser: object that store parsed vm commands from a file
writer: object that translate vm commands and write it into a file
"""
while parser.has_more_commands():
parser.advance()
command = parser.command_type
if command == C_ARITHMETIC:
writer.write_arithmetic(parser.arg1)
elif command == C_PUSH or command == C_POP:
writer.write_push_pop(command, parser.arg1, parser.arg2)
elif command == C_LABEL:
writer.write_label(parser.arg1)
elif command == C_GOTO:
writer.write_goto(parser.arg1)
elif command == C_IF:
writer.write_if(parser.arg1)
elif command == C_FUNCTION:
writer.write_function(parser.arg1, parser.arg2)
elif command == C_RETURN:
writer.write_return()
elif command == C_CALL:
writer.write_call(parser.arg1, parser.arg2)
def main():
translate('StackArithmetic\SimpleAdd')
translate('StackArithmetic\StackTest')
translate('MemoryAccess\StaticTest')
translate('MemoryAccess\BasicTest')
translate('MemoryAccess\PointerTest')
if __name__ == '__main__':
main() | [
"562818991@qq.com"
] | 562818991@qq.com |
f291b36dfe1dc4721cca244d642a6d64163db8bb | 4553718880fe8ab3af86fba4a4fe6b410f438061 | /siteprofile/__init__.py | 22a03455a9e93c6c8debcd8b9192386fc94097b7 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | pombredanne/fiee-dorsale | 8b8d565faebd79837a1eddef3472d08e3e7fbd69 | bca22cb5004643ad1919e126871c558c827dd0b6 | refs/heads/master | 2021-01-23T21:53:56.434152 | 2012-04-23T19:44:25 | 2012-04-23T19:44:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 181 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.utils.translation import ugettext_lazy as _
verbose_name = _(u'Site Profile')
verbose_name_plural = _(u'Site Profiles')
| [
"hraban@fiee.net"
] | hraban@fiee.net |
be247b21442bf84509ccf1b97ee94c774aba4625 | e42e1973b5f15a4ddd45cc0edc2e4f0e23575e71 | /0531.py | fbc134431d4e10b076ffcd2f1d2a5b4bcf0d53f3 | [] | no_license | ChuckData/Coding_the_Matrix | 4cd7a4bc7de1b50fbbaa55d247097e95beeaf593 | eda969dd943cadd2abac365c71e8bc3e521a509b | refs/heads/master | 2020-04-02T06:32:16.659249 | 2016-07-18T21:11:27 | 2016-07-18T21:11:27 | 62,667,506 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 107 | py | L = ['A','B','C']
keylist = ['a','b','c']
list2dict = {k:l for k, l in zip(keylist, L)}
print(list2dict) | [
"noreply@github.com"
] | ChuckData.noreply@github.com |
5dc5a139e36ad7492c36c661704cc142e8eb14e8 | 68d685257d353c84cc55de21dd0606f65c798484 | /crawler/crawlers/rieltor_ua/locators/flats_list_locators.py | 3d3a30e786fb00f52f5ac72b559b5c3ef819bb40 | [] | no_license | Justk1dd1ng/Estify | 310229c8cd93e1ff790e175e4ba3092d035bebec | 8164ff460d9175d2ed7771d9d71620b1c9ab4ec1 | refs/heads/master | 2023-06-30T05:22:57.236460 | 2021-08-03T11:53:30 | 2021-08-03T11:53:30 | 384,156,012 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 133 | py | from crawlers.abstract import FlatsListLocators
class RieltorFlatsListLocators(FlatsListLocators):
FLAT = 'div.catalog-item'
| [
"tantiarella@gmail.com"
] | tantiarella@gmail.com |
4ea263d263d66803cb0cd23c94ec021162f2117d | 329c20b232f562e63d19318adcde483cc14d3afb | /03_poo_python/exercice/06_ex_heritage_animalerie/Exo06_Refuge/program.py | 04828ca2cae44e13ec671a0d3d1e3e5822d0e24e | [] | no_license | Nicolas1936/bstorm_ai | 459103f2f94e1774a055d9582990d23008e83211 | 7fcc473d7200fd5abaa20e833ee406b734e70f6f | refs/heads/master | 2023-06-20T02:56:30.592184 | 2021-07-17T05:49:11 | 2021-07-17T05:49:11 | 363,119,978 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,926 | py | import datetime
from models.refuge import Refuge
from models.chat import Chat
from models.chien import Chien
r = Refuge("AnimalBel", 20, "Rue de bruxelles", 42, "Namur", 5000)
le_chat_1 = Chat("LeChat", 5, 0.3, "F", datetime.date(2017, 1, 1), True, "Calin")
le_chat_2 = Chat("Garfield", 10, 0.7, "M", datetime.date(2015, 2, 28), False, "Dormeur")
le_chien_1 = Chien("Didier", 40, 1, None, datetime.date(2012, 2, 3), "Rose", "Labrador", False)
r.ajouter_animal(le_chat_1)
r.ajouter_animal(le_chat_2)
r.ajouter_animal(le_chien_1)
def demande_animal():
choix = int(input("- Choix animal (1 - Chat, 2 - Chien) :"))
print()
nom = input("Nom : ")
poids = float(input("Poids : "))
taille = float(input("Taille : "))
sexe = input("Sexe (M/F) : ")
date_query = input("Date de naissance (YYYY-MM-DD) : ")
date_naissance = datetime.date.fromisoformat(date_query)
if choix == 1:
caractere = input("Caractere : ")
type_poils = input("Type de poils (Long/Court)")
poils_long = type_poils.upper() == "LONG"
animal = Chat(nom, poids, taille, sexe, date_naissance, poils_long, caractere)
else:
collier = input("Couleur collier : ")
race = input("Race : ")
dressage = input("Est dressé (Oui/Non): ")
est_dresse = dressage.upper() == "OUI"
animal = Chien(nom, poids, taille, sexe, date_naissance,collier, race, est_dresse)
return animal
while(r.nb_animaux > 0):
# Demande nouveau arrivant
nbr_text = input("Nombre d'arrivant (entrer pour passer): ")
if nbr_text.isdigit():
nbr = int(nbr_text)
while nbr > 0:
animal = demande_animal()
r.ajouter_animal(animal)
nbr -= 1
print()
#Simulation
r.simuler_journee()
print("**********************************************")
| [
"blanchard.nicolas@hotmail.com"
] | blanchard.nicolas@hotmail.com |
87d2f76d0c0ee7029fec4859b4f68ebf14088ef2 | 8c931730efec158421dc4f1bf70e273d7e8dfc51 | /code/tflearn/tflearn/estimators/base.py | 919c0ff541ef9de04057663876e298ac385290bd | [
"MIT"
] | permissive | SunnyMarkLiu/FRU | 69bb74b2b35566629a65a74cc6214121ffbd8bc4 | 010b3994f2758a7d06dec39061fce357cda1a10d | refs/heads/master | 2020-03-23T19:22:18.496073 | 2018-07-19T21:37:12 | 2018-07-19T21:37:12 | 141,972,722 | 1 | 0 | MIT | 2018-07-23T06:42:55 | 2018-07-23T06:42:55 | null | UTF-8 | Python | false | false | 4,770 | py | from __future__ import division, print_function, absolute_import
import os
import tensorflow as tf
from tensorflow.python.ops import resources
from ..utils import validate_func
class GraphBranch(object):
""" A graph branch class used for building part of an Estimator graph.
"""
def __init__(self, input_tensor=None, output_tensor=None, params=None):
self.input_tensor = input_tensor
self.output_tensor = output_tensor
self.params = params if params is not None else dict()
self._is_ready = False
if input_tensor is not None and output_tensor is not None:
self._is_ready = True
def build(self, input_tensor, output_tensor, params=None):
self.input_tensor = input_tensor
self.output_tensor = output_tensor
self.params = params if params is not None else dict()
self._is_ready = True
@property
def is_ready(self):
return self._is_ready
def get_params(self, x):
if x in self.params.keys():
return self.params[x]
else:
return None
class BaseEstimator(object):
""" Estimators Graph is only build when fit/predict or evaluate is called.
"""
def __init__(self, metric=None, log_dir='/tmp/tflearn_logs/',
global_step=None, session=None, graph=None, name=None):
self.name = name
# Estimator Graph and Session
self.graph = tf.Graph() if graph is None else graph
with self.graph.as_default():
conf = tf.ConfigProto(allow_soft_placement=True)
self.session = tf.Session(config=conf) if session is None else session
if global_step is None:
with self.graph.as_default():
self.global_step = tf.train.get_or_create_global_step()
self.metric = validate_func(metric)
# Estimator Graph Branches
self._train = GraphBranch()
self._pred = GraphBranch()
self._transform = GraphBranch()
self._eval = GraphBranch()
# Tensor Utils
if not os.path.exists(log_dir):
os.makedirs(log_dir)
self.log_dir = log_dir
self._is_initialized = False
self._to_be_restored = False
# Ops
self.train_op = None
self.loss_op = None
# -----------------
# Initializations
# -----------------
def _init_graph(self):
# Initialize all weights
if not self._is_initialized:
self.saver = tf.train.Saver()
init_vars = tf.group(tf.global_variables_initializer(),
resources.initialize_resources(
resources.shared_resources()))
self.session.run(init_vars)
self._is_initialized = True
# Restore weights if needed
if self._to_be_restored:
self.saver = tf.train.Saver()
self.saver.restore(self.session, self._to_be_restored)
self._to_be_restored = False
def _init_estimator(self):
raise NotImplementedError
# ----------------------
# Build Graph Branches
# ----------------------
def _build_fit(self, X, Y, batch_size, multi_inputs=False):
if not self._train._is_ready:
self._init_graph()
train_params = {'X': X, 'Y': Y, 'batch_size': batch_size,
'multi_inputs': multi_inputs}
self._train.build(None, None, train_params)
def _build_pred(self, input_tensor, output_tensor):
self._pred.build(input_tensor, output_tensor)
def _build_transform(self, input_tensor, output_tensor):
self._transform.build(input_tensor, output_tensor)
def _build_eval(self, X, Y, metric, batch_size, multi_inputs=False):
eval_params = {'X': X, 'Y': Y, 'batch_size': batch_size,
'metric': metric, 'multi_inputs': multi_inputs}
self._eval.build(None, None, eval_params)
# ---------
# Methods
# ---------
def fit(self, *args):
#TODO: Handle multiple fits
raise NotImplementedError
def predict(self, *args):
raise NotImplementedError
def evaluate(self, *args):
raise NotImplementedError
def load(self, *args):
raise NotImplementedError
def save(self, *args):
raise NotImplementedError
class SupervisedEstimator(BaseEstimator):
def __init__(self, metric=None, log_dir='/tmp/tflearn_logs/',
global_step=None, session=None, graph=None, name=None):
super(SupervisedEstimator, self).__init__(
metric=metric, log_dir=log_dir, global_step=global_step,
session=session, graph=graph, name=name)
def fit(self, X, Y, *args):
pass
| [
"yibolin@utexas.edu"
] | yibolin@utexas.edu |
bb5630d590dfe6c1e987d8698e11bff0633d156d | 7a803cd0c16ff676e3d7ecc33ec5e7af2c42d026 | /hello.py | f3123c66e088b6ec25c1b96a658855cae387ee88 | [] | no_license | emetowinner/VGG-Internship-Assignments- | ddc798da4c91572455d4f69b0a0524def13be268 | 67fa5b345b0981dd43694b72d5fc61f45d431c19 | refs/heads/master | 2020-12-15T03:40:37.321894 | 2020-03-05T00:46:39 | 2020-03-05T00:46:39 | 234,981,995 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 287 | py | #Learning Integer Literals
birth_month = input('Enter birth month: ')
if type(birth_month) == str:
print('Not an integer')
if type(birth_month) != int:
print('Converting to int now.........')
int(birth_month)
print('....................')
print('Now is of int type') | [
"emetowinner@gmail.com"
] | emetowinner@gmail.com |
6230181b39936aa1fefe1fa09f2dd52d16319752 | ed5c909afab4715d2b5f6af3c485511b8e2da03e | /venv/bin/pip3 | 4c7a8667f9ab812e075ebe9045394289356ff54e | [] | no_license | Jashwanth-Gowda-R/django_polls_app | 3b8b4761f051bc214e453938a0dfe1d8914ad74f | 9d9d3323abc4bc688ad350ab1cf65b019e8ffc37 | refs/heads/master | 2022-12-27T14:03:08.501097 | 2020-10-19T21:18:31 | 2020-10-19T21:18:31 | 303,642,667 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 427 | #!/Users/jashwanth/PycharmProjects/djangoProjects/FirstProject/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3')()
)
| [
"jashwanth.go@gmail.com"
] | jashwanth.go@gmail.com | |
c2fedc67cd4a6be00f098dfd061d67b0bab3d711 | fedabeac2d3460c6a0c8a6ddaf5ed50ebc947ffb | /tests/test_get_method.py | a319d001792e48ddb15f59d14eb7231684d6594e | [] | no_license | dnovichkov/FiddlerSessionReplay | cdda25a043e943c22b5fa74e96c751c425a69635 | 46a05290bdd1c58376611870592aea51e5d2c765 | refs/heads/master | 2023-05-27T09:39:38.746714 | 2023-04-18T06:49:12 | 2023-04-18T06:49:12 | 266,951,666 | 0 | 0 | null | 2023-05-22T23:28:51 | 2020-05-26T05:22:46 | Python | UTF-8 | Python | false | false | 371 | py | from fiddler_session_replay.data_extracters import get_method
def test_get_method():
method = get_method('GET url POST')
assert method == 'GET'
method = get_method('GET url ver')
assert method == 'GET'
method = get_method('POST smth else')
assert method == 'POST'
method = get_method('Some text without method')
assert method is None
| [
"dmitriy.novichkov@gmail.com"
] | dmitriy.novichkov@gmail.com |
373176a61e53376daeb20dc28693612b50a51e2b | cbafa8e6b9fb98beb86effbbf68fd06a8951ad2d | /html_downloader.py | 2ead61d328f3574c3b8ea147cd93ee841f7c94c6 | [] | no_license | ym754870370/Python-crawler | f6550e3ded4181c35ee59eb727464f8c0b2cfc7d | 6b9553b02534e96d30a1320df4babf1cfb365346 | refs/heads/master | 2021-01-13T12:34:29.851447 | 2017-01-15T09:36:57 | 2017-01-15T09:36:57 | 72,544,152 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 275 | py | #coding: utf8
import urllib2
class HtmlDownloader(object):
def download(self, url):
if url is None:
return None
response = urllib2.urlopen(url)
if response.getcode() !== 200
return None
return response.read()
| [
"wawa754870370@qq.com"
] | wawa754870370@qq.com |
e6549a914956babd4616fd1c41f0ff0a74a2d029 | 6d8ba0ccf79911b91b5288aae4a3e26ccc4c8ba9 | /ui/uartsearch_ui.py | ab5d3dbcb11ef6613270ccecae0e0e104a08db33 | [] | no_license | McHummer1/qt-com-term-sample | 94ef6b21a870f20a24d38dd26a7392048c037a49 | 46ac62e0770ce48d23105fb939a6728293b47bcb | refs/heads/master | 2021-01-03T10:42:54.047541 | 2020-02-12T15:25:50 | 2020-02-12T15:25:50 | 240,045,758 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,349 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'uartsearch.ui'
#
# Created by: PyQt5 UI code generator 5.14.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(800, 200)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(MainWindow.sizePolicy().hasHeightForWidth())
MainWindow.setSizePolicy(sizePolicy)
MainWindow.setMinimumSize(QtCore.QSize(400, 100))
MainWindow.setMaximumSize(QtCore.QSize(1200, 300))
MainWindow.setDocumentMode(False)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.comboBox = QtWidgets.QComboBox(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.comboBox.sizePolicy().hasHeightForWidth())
self.comboBox.setSizePolicy(sizePolicy)
self.comboBox.setObjectName("comboBox")
self.horizontalLayout.addWidget(self.comboBox)
self.connectButton = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.connectButton.sizePolicy().hasHeightForWidth())
self.connectButton.setSizePolicy(sizePolicy)
self.connectButton.setObjectName("connectButton")
self.horizontalLayout.addWidget(self.connectButton)
self.verticalLayout.addLayout(self.horizontalLayout)
self.scanButton = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.scanButton.sizePolicy().hasHeightForWidth())
self.scanButton.setSizePolicy(sizePolicy)
self.scanButton.setObjectName("scanButton")
self.verticalLayout.addWidget(self.scanButton)
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Choose port"))
self.connectButton.setText(_translate("MainWindow", "Connect"))
self.scanButton.setText(_translate("MainWindow", "Scan"))
| [
"noreply@github.com"
] | McHummer1.noreply@github.com |
f41efd65e95eea56a6234df8008ffccd64907272 | 49763096a59300e8b796a8873e3166c711db6e5a | /covid/bin/pip3 | 6f6b0c94401bbdbd45fb00e61ba98e731b8b927e | [] | no_license | sulik011/tg_bot_covid | 1fab4b59392fe3231a71216494e81a8d87defcd3 | 1b505086c12a40eebe5ec1fe8e0f996f730b73bc | refs/heads/master | 2022-12-08T12:48:05.714161 | 2020-09-05T08:29:48 | 2020-09-05T08:29:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 242 | #!/home/sultan/tg_bot/covid/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"myrzakanovsultan0@gmail.com"
] | myrzakanovsultan0@gmail.com | |
b24b30c78b67e40110375021b6898570d97b0cf0 | aa32e247ee53706de91bf0c3aa98cc7af5c07c48 | /WPlusJets/8TeV/WJetsAnalysis/TreeMaker/NTupleAnalyzer.py | 6bc2c08e61284f5d6d7f27713a1313a79857934a | [] | no_license | darinbaumgartel/usercode | ddff50ce21fc3a40519da1883e337dd41e2ef07c | 730dff45371d6695446146b454d7b82ae7c49e53 | refs/heads/master | 2021-01-02T23:07:15.163446 | 2013-10-15T15:49:08 | 2013-10-15T15:49:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,278 | py | #!/usr/bin/python
from datetime import datetime
import sys
sys.argv.append( '-b True' )
from ROOT import *
import array
import math
from optparse import OptionParser
startTime = datetime.now()
tRand = TRandom3()
from random import randint
import os
##########################################################################################
################# SETUP OPTIONS - File, Normalization, etc #######################
##########################################################################################
# Input Options - file, cross-section, number of vevents
parser = OptionParser()
parser.add_option("-f", "--file", dest="filename", help="input root file", metavar="FILE")
parser.add_option("-b", "--batch", dest="dobatch", help="run in batch mode", metavar="BATCH")
parser.add_option("-s", "--sigma", dest="crosssection", help="specify the process cross-section", metavar="SIGMA")
parser.add_option("-n", "--ntotal", dest="ntotal", help="total number of MC events for the sample", metavar="NTOTAL")
parser.add_option("-l", "--lumi", dest="lumi", help="integrated luminosity for data taking", metavar="LUMI")
parser.add_option("-j", "--json", dest="json", help="json file for certified run:lumis", metavar="JSON")
parser.add_option("-d", "--dir", dest="dir", help="output directory", metavar="DIR")
parser.add_option("-p", "--pdf", dest="pdf", help="option to produce pdf uncertainties", metavar="PDF")
(options, args) = parser.parse_args()
dopdf = int(options.pdf)==1
# Here we get the file name, and adjust it accordingly for EOS, castor, or local directory
name = options.filename
if '/store' in name:
name = 'root://eoscms//eos/cms'+name
if '/castor/cern.ch' in name:
name = 'rfio://'+name
# Typical event weight, sigma*lumi/Ngenerated
startingweight = float(options.crosssection)*float(options.lumi)/float(options.ntotal)
# Get the file, tree, and number of entries
print name
fin = TFile.Open(name,"READ")
to = fin.Get("rootTupleTree/tree")
No = to.GetEntries()
indicator = ((name.split('_'))[-1]).replace('.root','')
junkfile = str(randint(100000000,1000000000))+indicator+'junk.root'
fj2 = TFile.Open(junkfile,'RECREATE')
cuts = '(1)'
t = to.CopyTree()
N = t.GetEntries()
print N, 'Events to Process'
##########################################################################################
################# PREPARE THE VARIABLES FOR THE OUTPUT TREE #######################
##########################################################################################
# Branches will be created as follows: One branch for each kinematic variable for each
# systematic variation determined in _variations. One branch for each weight and flag.
# So branch names will include weight_central, run_number, Pt_muon1, Pt_muon1MESUP, etc.
_kinematicvariables = ['Pt_muon1','Pt_muon2','Pt_ele1','Pt_ele2','Pt_jet1','Pt_jet2','Pt_jet3','Pt_jet4','Pt_jet5','Pt_miss']
_kinematicvariables += ['Eta_muon1','Eta_muon2','Eta_ele1','Eta_ele2','Eta_jet1','Eta_jet2','Eta_jet3','Eta_jet4','Eta_jet5','Eta_miss']
_kinematicvariables += ['Phi_muon1','Phi_muon2','Phi_ele1','Phi_ele2','Phi_jet1','Phi_jet2','Phi_jet3','Phi_jet4','Phi_jet5','Phi_miss']
_kinematicvariables += ['Dphi_muon1jet1','Dphi_muon1jet2','Dphi_muon1jet3','Dphi_muon1jet4','Dphi_muon1jet5']
_kinematicvariables += ['Dphi_ele1jet1','Dphi_ele1jet2','Dphi_ele1jet3','Dphi_ele1jet4','Dphi_ele1jet5']
_kinematicvariables += ['MT_ev','MT_uv','HT_jets']
_kinematicvariables += ['JetCount','MuonCount','EleCount']
_kinematicvariables_gen = [x+'_gen' for x in _kinematicvariables]
_kinematicvariables_genbare = [x+'_genbare' for x in _kinematicvariables]
_kinematicvariables += _kinematicvariables_gen
_kinematicvariables += _kinematicvariables_genbare
_weights = ['weight_nopu','weight_central', 'weight_pu_up', 'weight_pu_down']
_flags = ['run_number','event_number','lumi_number','GoodVertexCount']
_flags += ['passPrimaryVertex','passBeamScraping','passHBHENoiseFilter','passBPTX0','passBeamHalo','passTrackingFailure','passTriggerObjectMatching','passDataCert']
_flags += ['passBadEESuperCrystal','passEcalDeadCellBE','passEcalDeadCellTP','passEcalLaserCorr','passHcalLaserEvent','passPhysDeclared']
_flags += ['passTrigger_IsoMuon','prescaleTrigger_IsoMuon','passTrigger_NonIsoMuon','prescaleTrigger_NonIsoMuon']
_flags += ['passTrigger_IsoEle','prescaleTrigger_IsoEle','passTrigger_NonIsoEle','prescaleTrigger_NonIsoEle']
_variations = ['','JESup','JESdown','MESup','MESdown','JERup','JERdown','MER']
if nonisoswitch==True or emuswitch==True or quicktestswitch==True:
print 'NOT performing systematics...'
_variations = [''] # For quicker tests
# _variations = [''] # For quicker tests
##########################################################################################
################# Deal with weights including PDF and Pileup #######################
##########################################################################################
def GetPURescalingFactors(puversion):
# Purpose: To get the pileup reweight factors from the PU_Central.root, PU_Up.root, and PU_Down.root files.
# The MC Truth distribution is taken from https://twiki.cern.ch/twiki/bin/view/CMS/PileupMCReweightingUtilities
MCDistSummer12 = [2.560E-06, 5.239E-06, 1.420E-05, 5.005E-05, 1.001E-04, 2.705E-04, 1.999E-03, 6.097E-03, 1.046E-02, 1.383E-02,
1.685E-02, 2.055E-02, 2.572E-02, 3.262E-02, 4.121E-02, 4.977E-02, 5.539E-02, 5.725E-02, 5.607E-02, 5.312E-02, 5.008E-02, 4.763E-02,
4.558E-02, 4.363E-02, 4.159E-02, 3.933E-02, 3.681E-02, 3.406E-02, 3.116E-02, 2.818E-02, 2.519E-02, 2.226E-02, 1.946E-02, 1.682E-02,
1.437E-02, 1.215E-02, 1.016E-02, 8.400E-03, 6.873E-03, 5.564E-03, 4.457E-03, 3.533E-03, 2.772E-03, 2.154E-03, 1.656E-03, 1.261E-03,
9.513E-04, 7.107E-04, 5.259E-04, 3.856E-04, 2.801E-04, 2.017E-04, 1.439E-04, 1.017E-04, 7.126E-05, 4.948E-05, 3.405E-05, 2.322E-05,
1.570E-05, 5.005E-06]
if puversion =='Basic':
h_pu_up = TFile.Open("PU_Up.root",'read').Get('pileup')
h_pu_down = TFile.Open("PU_Down.root",'read').Get('pileup')
h_pu_central = TFile.Open("PU_Central.root",'read').Get('pileup')
if puversion =='2012D':
h_pu_up = TFile.Open("PU_Up_2012D.root",'read').Get('pileup')
h_pu_down = TFile.Open("PU_Down_2012D.root",'read').Get('pileup')
h_pu_central = TFile.Open("PU_Central_2012D.root",'read').Get('pileup')
bins_pu_central = []
bins_pu_up = []
bins_pu_down = []
for x in range(h_pu_up.GetNbinsX()):
bin = x +1
bins_pu_central.append(h_pu_central.GetBinContent(bin))
bins_pu_up.append(h_pu_up.GetBinContent(bin))
bins_pu_down.append(h_pu_down.GetBinContent(bin))
total_pu_central = sum(bins_pu_central)
total_pu_up = sum(bins_pu_up)
total_pu_down = sum(bins_pu_down)
total_mc = sum(MCDistSummer12)
bins_pu_central_norm = [x/total_pu_central for x in bins_pu_central]
bins_pu_up_norm = [x/total_pu_up for x in bins_pu_up]
bins_pu_down_norm = [x/total_pu_down for x in bins_pu_down]
bins_mc_norm = [x/total_mc for x in MCDistSummer12]
scale_pu_central = []
scale_pu_up = []
scale_pu_down = []
for x in range(len(bins_mc_norm)):
scale_pu_central.append(bins_pu_central_norm[x]/bins_mc_norm[x])
scale_pu_up.append(bins_pu_up_norm[x]/bins_mc_norm[x])
scale_pu_down.append(bins_pu_down_norm[x]/bins_mc_norm[x])
return [scale_pu_central, scale_pu_up, scale_pu_down]
# Use the above function to get the pu weights
[CentralWeights,UpperWeights,LowerWeights] =GetPURescalingFactors('Basic')
[CentralWeights_2012D,UpperWeights_2012D,LowerWeights_2012D] =GetPURescalingFactors('2012D')
print ' ** ',name
indicator = ((name.split('_'))[-1]).replace('.root','')
tmpfout = str(randint(100000000,1000000000))+indicator+'.root'
# finalfout = options.dir+'/'+name.split('/')[-1].replace('.root','_tree.root')
finalfout = options.dir+'/'+(name.split('/')[-2]+'__'+name.split('/')[-1].replace('.root','_tree.root'))
# Create the output file and tree "PhysicalVariables"
fout = TFile.Open(tmpfout,"RECREATE")
tout=TTree("PhysicalVariables","PhysicalVariables")
def GetPDFWeightVars(T):
# Purpose: Determine all the branch names needed to store the PDFWeights
# for CTEQ, MSTW, and NNPDF in flat (non vector) form.
if T.isData:
return []
else:
T.GetEntry(1)
pdfweights=[]
for x in range(len(T.PDFCTEQWeights)):
pdfweights.append('factor_cteq_'+str(x+1))
for x in range(len(T.PDFMSTWWeights)):
pdfweights.append('factor_mstw_'+str(x+1))
for x in range(len(T.PDFNNPDFWeights)):
pdfweights.append('factor_nnpdf_'+str(x+1))
return pdfweights
# Use the above function to get the pdfweights
_pdfweights = GetPDFWeightVars(t)
# _pdfLHS will store the lefthand side of an equation to cast all pdfweights
# into their appropriate branches
_pdfLHS = '['
# Below all the branches are created, everything is a double except for flags
for b in _kinematicvariables:
for v in _variations:
exec(b+v+' = array.array("f",[0])')
exec('tout.Branch("'+b+v+'",'+b+v+',"'+b+v+'/F")' )
for b in _weights:
exec(b+' = array.array("f",[0])')
exec('tout.Branch("'+b+'",'+b+',"'+b+'/F")' )
if dopdf:
for b in _pdfweights:
exec(b+' = array.array("f",[0])')
exec('tout.Branch("'+b+'",'+b+',"'+b+'/F")' )
_pdfLHS += (b+'[0],')
for b in _flags:
exec(b+' = array.array("L",[0])')
exec('tout.Branch("'+b+'",'+b+',"'+b+'/i")' )
_pdfLHS +=']'
_pdfLHS = _pdfLHS.replace(',]',']')
##########################################################################################
################# SPECIAL FUNCTIONS FOR ANALYSIS #######################
##########################################################################################
def PrintBranchesAndExit(T):
# Purpose: Just list the branches on the input file and bail out.
# For coding and debugging
x = T.GetListOfBranches()
for n in x:
print n
sys.exit()
# PrintBranchesAndExit(t)
def GetRunLumiList():
# print options.json
jfile = open(options.json,'r')
flatjson = ''
for line in jfile:
flatjson+=line.replace('\n','')
flatjson = flatjson.replace("}","")
flatjson = flatjson.replace("{","")
flatjson = flatjson.replace(":","")
flatjson = flatjson.replace(" ","")
flatjson = flatjson.replace("\t","")
jinfo = flatjson.split('"')
strjson = ''
for j in jinfo:
strjson += j
strjson = strjson.replace('\n[',' [')
strjson = strjson.replace(']],',']]\n')
strjson = strjson.replace('[[',' [[')
pairs = []
for line in strjson.split('\n'):
pair = []
line = line.split(' ')
exec('arun = '+line[0])
exec('alumis = '+line[1])
verboselumis = []
for r in alumis:
verboselumis += range(r[0],r[1]+1)
pair.append(arun)
pair.append(verboselumis)
pairs.append(pair)
return pairs
GoodRunLumis = GetRunLumiList()
def CheckRunLumiCert(r,l):
for _rl in GoodRunLumis:
if _rl[0]==r:
for _l in _rl[1]:
if _l == l:
return True
return False
def GeomFilterCollection(collection_to_clean,good_collection,dRcut):
# Purpose: Take a collection of TLorentzVectors that you want to clean (arg 1)
# by removing all objects within dR of dRcut (arg 3) of any element in
# the collection of other particles (arg 2)
# e.g. argumments (jets,muons,0.3) gets rid of jets within 0.3 of muons.
output_collection = []
for c in collection_to_clean:
isgood = True
for g in good_collection:
if (c.DeltaR(g))<dRcut:
isgood = False
if isgood==True:
output_collection.append(c)
return output_collection
def TransMass(p1,p2):
# Purpose: Simple calculation of transverse mass between two TLorentzVectors
return math.sqrt( 2*p1.Pt()*p2.Pt()*(1-math.cos(p1.DeltaPhi(p2))) )
def InvMass(particles):
# Purpose: Simple calculation of invariant mass between two TLorentzVectors
output=particles
return (p1+p2).M()
def ST(particles):
# Purpose: Calculation of the scalar sum of PT of a set of TLorentzVectors
st = 0.0
for p in particles:
st += p.Pt()
return st
def PassTrigger(T,trigger_identifiers,prescale_threshold):
# Purpose: Return a flag (1 or 0) to indicate whether the event passes any trigger
# which is syntactically matched to a set of strings trigger_identifiers,
# considering only triggers with a prescale <= the prescale threshold.
for n in range(len(T.HLTInsideDatasetTriggerNames)):
name = T.HLTInsideDatasetTriggerNames[n]
consider_trigger=True
for ident in trigger_identifiers:
if ident not in name:
consider_trigger=False
if (consider_trigger==False) : continue
prescale = T.HLTInsideDatasetTriggerPrescales[n]
if prescale > prescale_threshold:
consider_trigger=False
if (consider_trigger==False) : continue
decision = T.HLTInsideDatasetTriggerDecisions[n]
if decision==True:
return 1
return 0
def CountVertices(T):
vertices = 0
for v in range(len(T.VertexZ)):
if ( T.VertexIsFake[v] == True ) : continue
if ( T.VertexNDF[v] <= 4.0 ) : continue
if ( abs(T.VertexZ[v]) > 24.0 ) : continue
if ( abs(T.VertexRho[v]) >= 2.0 ) : continue
vertices += 1
return vertices
def GetPUWeight(T,version,puversion):
# Purpose: Get the pileup weight for an event. Version can indicate the central
# weight, or the Upper or Lower systematics. Needs to be updated for
# input PU histograms given only the generated disribution........
# Only necessary for MC
if T.isData:
return 1.0
# Getting number of PU interactions, start with zero.
N_pu = 0
# Set N_pu to number of true PU interactions in the central bunch
for n in range(len(T.PileUpInteractionsTrue)):
if abs(T.PileUpOriginBX[n]==0):
N_pu = int(1.0*(T.PileUpInteractionsTrue[n]))
puweight = 0
# Assign the list of possible PU weights according to what is being done
# Central systematics Up, or systematic down
if puversion=='Basic':
puweights = CentralWeights
if version=='SysUp':
puweights=UpperWeights
if version=='SysDown':
puweights=LowerWeights
# Also possible to do just for 2012D, for cross-checks.
if puversion=='2012D':
puweights = CentralWeights_2012D
if version=='SysUp':
puweights=UpperWeights_2012D
if version=='SysDown':
puweights=LowerWeights_2012D
# Make sure there exists a weight for the number of interactions given,
# and set the puweight to the appropriate value.
NRange = range(len(puweights))
if N_pu in NRange:
puweight=puweights[N_pu]
# print puweight
return puweight
def FillPDFWeights(T):
# Purpose: Given the _pdfLHS stored at the begging when branches were created,
# this function will set all PDFweights in the event.
_allweights = []
_allweights += T.PDFCTEQWeights
_allweights += T.PDFNNPDFWeights
_allweights += T.PDFMSTWWeights
return (_pdfLHS+' = '+str(_allweights))
def PropagatePTChangeToMET(met,original_object,varied_object):
# Purpose: This takes an input TLorentzVector met representing the missing ET
# (no eta component), and an original object (arg 2), which has been
# kinmatically modified for a systematic (arg 3), and modifies the
# met to compensate for the change in the object.
return met + varied_object - original_object
def IDMuons(T,met,variation):
# Purpose: Gets the collection of muons passing tight muon ID.
# Returns muons as TLorentzVectors, and indices corrresponding
# to the surviving muons of the muon collection.
# Also returns modified MET for systematic variations.
muons = []
muoninds = []
if variation=='MESup':
_MuonPt = [pt*(1.0+0.05*0.001*pt) for pt in T.MuonPt]
elif variation=='MESdown':
_MuonPt = [pt*(1.0-0.05*0.001*pt) for pt in T.MuonPt]
elif variation=='MER':
_MuonPt = [pt+pt*tRand.Gaus(0.0, 0.01*(pt<=200) + 0.04*(pt>200) ) for pt in T.MuonPt]
else:
_MuonPt = [pt for pt in T.MuonPt]
trk_isos = []
charges = []
for n in range(len(_MuonPt)):
Pass = True
Pass *= (T.MuonPt[n] > 20)
Pass *= abs(T.MuonEta[n])<2.1
Pass *= T.MuonIsGlobal[n]
Pass *= T.MuonIsPF[n]
Pass *= T.MuonBestTrackVtxDistXY[n] < 0.2
Pass *= T.MuonBestTrackVtxDistZ[n] < 0.5
if nonisoswitch != True:
Pass *= (T.MuonTrackerIsoSumPT[n]/_MuonPt[n])<0.1
# Pass *= T.MuonTrkHitsTrackerOnly[n]>=11 // old !!
Pass *= T.MuonStationMatches[n]>1
# Pass *= abs(T.MuonPrimaryVertexDXY[n])<0.2 //old !!
Pass *= T.MuonGlobalChi2[n]<10.0
Pass *= T.MuonPixelHits[n]>=1
Pass *= T.MuonGlobalTrkValidHits[n]>=1
Pass *= T.MuonTrackLayersWithMeasurement[n] > 5
if (Pass):
NewMu = TLorentzVector()
OldMu = TLorentzVector()
NewMu.SetPtEtaPhiM(_MuonPt[n],T.MuonEta[n],T.MuonPhi[n],0)
OldMu.SetPtEtaPhiM(T.MuonPt[n],T.MuonEta[n],T.MuonPhi[n],0)
met = PropagatePTChangeToMET(met,OldMu,NewMu)
Pass *= (_MuonPt[n] > 35)
if (Pass):
muons.append(NewMu)
trk_isos.append((T.MuonTrackerIsoSumPT[n]/_MuonPt[n]))
charges.append(T.MuonCharge[n])
muoninds.append(n)
deltainvpt = ( 1.0/T.MuonTrkPt[n] - 1.0/T.MuonPt[n])
return [muons,muoninds,met]
def IDElectrons(T,met,variation):
# Purpose: Gets the collection of electrons passing HEEP ID.
# Returns electrons as TLorentzVectors, and indices corrresponding
# to the surviving electrons of the electron collection.
# Also returns modified MET for systematic variations.
electrons = []
electroninds = []
if variation=='EESup':
_ElectronPt = [pt*1.01 for pt in T.ElectronPtHeep]
elif variation=='EESdown':
_ElectronPt = [pt*0.99 for pt in T.ElectronPtHeep]
elif variation=='EER':
_ElectronPt = [pt+pt*tRand.Gaus(0.0,0.04) for pt in T.ElectronPtHeep]
else:
_ElectronPt = [pt for pt in T.ElectronPtHeep]
for n in range(len(_ElectronPt)):
Pass = True
Pass *= (T.ElectronPtHeep[n] > 35)
Pass *= abs(T.ElectronEta[n])<2.1
barrel = (abs(T.ElectronEta[n]))<1.442
endcap = (abs(T.ElectronEta[n]))>1.56
Pass *= (barrel+endcap)
if barrel:
Pass *= T.ElectronHasEcalDrivenSeed[n]
Pass *= T.ElectronDeltaEtaTrkSC[n] < 0.005
Pass *= T.ElectronDeltaPhiTrkSC[n] < 0.06
Pass *= T.ElectronHoE[n] < 0.05
Pass *= ((T.ElectronE2x5OverE5x5[n] > 0.94) or (T.ElectronE1x5OverE5x5[n] > 0.83) )
Pass *= T.ElectronHcalIsoD1DR03[n] < (2.0 + 0.03*_ElectronPt[n] + 0.28*T.rhoForHEEP)
Pass *= T.ElectronTrkIsoDR03[n] < 5.0
Pass *= T.ElectronMissingHits[n] <=1
Pass *= T.ElectronLeadVtxDistXY[n]<0.02
if endcap:
Pass *= T.ElectronHasEcalDrivenSeed[n]
Pass *= T.ElectronDeltaEtaTrkSC[n] < 0.007
Pass *= T.ElectronDeltaPhiTrkSC[n] < 0.06
Pass *= T.ElectronHoE[n] < 0.05
Pass *= T.ElectronSigmaIEtaIEta[n] < 0.03
if _ElectronPt[n]<50:
Pass *= (T.ElectronHcalIsoD1DR03[n] < (2.5 + 0.28*T.rhoForHEEP))
else:
Pass *= (T.ElectronHcalIsoD1DR03[n] < (2.5 + 0.03*(_ElectronPt[n]-50.0) + 0.28*T.rhoForHEEP))
Pass *= T.ElectronTrkIsoDR03[n] < 5.0
Pass *= T.ElectronMissingHits[n] <=1
Pass *= T.ElectronLeadVtxDistXY[n]<0.05
if (Pass):
NewEl = TLorentzVector()
OldEl = TLorentzVector()
NewEl.SetPtEtaPhiM(_ElectronPt[n],T.ElectronEta[n],T.ElectronPhi[n],0)
OldEl.SetPtEtaPhiM(T.ElectronPtHeep[n],T.ElectronEta[n],T.ElectronPhi[n],0)
met = PropagatePTChangeToMET(met,OldEl,NewEl)
Pass *= (_ElectronPt[n] > 35)
if (Pass):
electrons.append(NewEl)
electroninds.append(n)
return [electrons,electroninds,met]
def JERModifiedPt(pt,eta,phi,T,modtype):
# Purpose: Modify reco jets based on genjets. Input is pt/eta/phi of a jet.
# The jet will be matched to a gen jet, and the difference
# between reco and gen will be modified according to appropriate
# pt/eta dependent scale factors.
# The modified jet PT is returned.
# https://hypernews.cern.ch/HyperNews/CMS/get/JetMET/1336.html
# https://twiki.cern.ch/twiki/bin/view/CMS/JetResolution
bestn = -1
bestdpt = 0
bestdR = 9999999.9
jet = TLorentzVector()
jet.SetPtEtaPhiM(pt,eta,phi,0.0)
for n in range(len(T.GenJetPt)):
gjet = TLorentzVector()
gjet.SetPtEtaPhiM(T.GenJetPt[n],T.GenJetEta[n],T.GenJetPhi[n],0.0)
dR = abs(jet.DeltaR(gjet))
if dR<bestdR and dR<0.3 :
bestdR = dR
bestn = n
bestdpt = pt-gjet.Pt()
if bestdR>0.5:
return pt
abseta = abs(eta)
if abseta >= 0 : jfacs = [ 0.05200 , 0.11515 , -0.00900 ]
if abseta >= 0.5 : jfacs = [ 0.05700 , 0.11427 , 0.00200 ]
if abseta >= 1.1 : jfacs = [ 0.09600 , 0.16125 , 0.03400 ]
if abseta >= 1.7 : jfacs = [ 0.13400 , 0.22778 , 0.04900 ]
if abseta >= 2.3 : jfacs = [ 0.28800 , 0.48838 , 0.13500 ]
if modtype == '':
adjustmentfactor = jfacs[0]
if modtype == 'up':
adjustmentfactor = jfacs[1]
if modtype == 'down':
adjustmentfactor = jfacs[2]
ptadjustment = adjustmentfactor*bestdpt
pt += ptadjustment
return pt
def LooseIDJets(T,met,variation,isdata):
# Purpose: Gets the collection of jets passing loose PFJet ID.
# Returns jets as TLorentzVectors, and indices corrresponding
# to the surviving jetss of the jet collection.
# Also returns modified MET for systematic variations.
if variation!='JERup' and variation!='JERdown':
# _PFJetPt = [JERModifiedPt(T.PFJetPt[n],T.PFJetEta[n],T.PFJetPhi[n],T,'') for n in range(len(T.PFJetPt))]
_PFJetPt = [pt for pt in T.PFJetPt]
if variation=='JERup':
_PFJetPt = [JERModifiedPt(T.PFJetPt[n],T.PFJetEta[n],T.PFJetPhi[n],T,'up') for n in range(len(T.PFJetPt))]
if variation=='JERdown':
_PFJetPt = [JERModifiedPt(T.PFJetPt[n],T.PFJetEta[n],T.PFJetPhi[n],T,'down') for n in range(len(T.PFJetPt))]
if variation=='JESup':
_PFJetPt = [ _PFJetPt[n]*(1.0+T.PFJetJECUnc[n]) for n in range(len(_PFJetPt))]
if variation=='JESdown':
_PFJetPt = [ _PFJetPt[n]*(1.0-T.PFJetJECUnc[n]) for n in range(len(_PFJetPt))]
if (isdata):
_PFJetPt = [pt for pt in T.PFJetPt]
JetFailThreshold=0.0
jets=[]
jetinds = []
NHF = []
NEMF = []
for n in range(len(_PFJetPt)):
if _PFJetPt[n]>30 and abs(T.PFJetEta[n])<2.4 :
if T.PFJetPassLooseID[n]==1:
j = TLorentzVector()
j.SetPtEtaPhiM(_PFJetPt[n],T.PFJetEta[n],T.PFJetPhi[n],0)
oldjet = TLorentzVector()
oldjet.SetPtEtaPhiM(T.PFJetPt[n],T.PFJetEta[n],T.PFJetPhi[n],0)
met = PropagatePTChangeToMET(met,oldjet,j)
jets.append(j)
jetinds.append(n)
NHF.append(T.PFJetNeutralHadronEnergyFraction[n])
NEMF.append(T.PFJetNeutralEmEnergyFraction[n])
else:
if _PFJetPt[n] > JetFailThreshold:
JetFailThreshold = _PFJetPt[n]
# print met.Pt()
return [jets,jetinds,met,JetFailThreshold,NHF,NEMF]
def MetVector(T):
# Purpose: Creates a TLorentzVector represting the MET. No pseudorapidith, obviously.
met = TLorentzVector()
met.SetPtEtaPhiM(T.PFMETType01XYCor[0],0,T.PFMETPhiType01XYCor[0],0)
return met
##########################################################################################
########### FULL CALCULATION OF ALL VARIABLES, REPEATED FOR EACH SYS ##############
##########################################################################################
def FullKinematicCalculation(T,variation):
# Purpose: This is the magic function which calculates all kinmatic quantities using
# the previous functions. It returns them as a simple list of doubles.
# It will be used in the loop over events. The 'variation' argument is passed
# along when getting the sets of leptons and jets, so the kinematics will vary.
# This function is repeated for all the sytematic variations inside the event
# loop. The return arguments ABSOLUELY MUST be in the same order they are
# listed in the branch declarations. Modify with caution.
# MET as a vector
met = MetVector(T)
# ID Muons,Electrons
[muons,goodmuoninds,met] = IDMuons(T,met,variation,T.isData)
[electrons,electroninds,met] = IDElectrons(T,met,variation)
# ID Jets and filter from muons
[jets,jetinds,met,failthreshold,neutralhadronEF,neutralemEF] = LooseIDJets(T,met,variation,T.isData)
jets = GeomFilterCollection(jets,muons,0.5)
jets = GeomFilterCollection(jets,electrons,0.5)
# Empty lorenz vector for bookkeeping
EmptyLorentz = TLorentzVector()
EmptyLorentz.SetPtEtaPhiM(.00001,0,0,0)
# Muon and Jet Counts
_mucount = len(muons)
_elcount = len(electrons)
_jetcount = len(jets)
# Make sure there are two of each lepton type, even if zero.
if len(muons) < 1 : muons.append(EmptyLorentz)
if len(muons) < 2 : muons.append(EmptyLorentz)
if len(electrons) < 1 : electrons.append(EmptyLorentz)
if len(electrons) < 2 : electrons.append(EmptyLorentz)
# Make sure there are 5 jet objects
if len(jets) < 1 : jets.append(EmptyLorentz)
if len(jets) < 2 : jets.append(EmptyLorentz)
if len(jets) < 3 : jets.append(EmptyLorentz)
if len(jets) < 4 : jets.append(EmptyLorentz)
if len(jets) < 5 : jets.append(EmptyLorentz)
# Get kinmetic quantities
[_ptmu1,_etamu1,_phimu1] = [muons[0].Pt(),muons[0].Eta(),muons[0].Phi()]
[_ptmu2,_etamu2,_phimu2] = [muons[1].Pt(),muons[1].Eta(),muons[1].Phi()]
[_ptel1,_etael1,_phiel1] = [electrons[0].Pt(),electrons[0].Eta(),electrons[0].Phi()]
[_ptel2,_etael2,_phiel2] = [electrons[1].Pt(),electrons[1].Eta(),electrons[1].Phi()]
[_ptj1,_etaj1,_phij1] = [jets[0].Pt(),jets[0].Eta(),jets[0].Phi()]
[_ptj2,_etaj2,_phij2] = [jets[1].Pt(),jets[1].Eta(),jets[1].Phi()]
[_ptj3,_etaj3,_phij3] = [jets[2].Pt(),jets[2].Eta(),jets[2].Phi()]
[_ptj4,_etaj4,_phij4] = [jets[3].Pt(),jets[3].Eta(),jets[3].Phi()]
[_ptj5,_etaj5,_phij5] = [jets[4].Pt(),jets[4].Eta(),jets[4].Phi()]
[_ptmet,_etamet,_phimet] = [met.Pt(),0,met.Phi()]
_htjets = ST(jets)
# Get DPhi Variables
[_DPhiu1j1,_DPhiu1j2,_DPhiu1j3,_DPhiu1j4,_DPhiu1j5] = [abs(particle.DeltaPhi(jets[0])) for particle in [muons[:5]]
[_DPhie1j1,_DPhie1j2,_DPhie1j3,_DPhie1j4,_DPhie1j5] = [abs(particle.DeltaPhi(jets[0])) for particle in [electrons[:5]]]
# Get MT variables:
_mtuv = TransMass(muon1[0],met)
_mtev = TransMass(electrons[0],met)
# This MUST have the same structure as _kinematicvariables!
toreturn = []
toreturn += [_ptmu1,_ptmu2,_ptel1,_ptel2,_ptj1,_ptj2,_ptj3,_ptj4,_ptj5,_ptmet]
toreturn += [_etamu1,_etamu2,_etael1,_etael2,_etaj1,_etaj2,_etaj3,_etaj4,_etaj5,_etamet]
toreturn += [_phimu1,_phimu2,_phiel1,_phiel2,_phij1,_phij2,_phij3,_phij4,_phij5,_phimet]
toreturn += [_DPhiu1j1,_DPhiu1j2,_DPhiu1j3,_DPhiu1j4,_DPhiu1j5]
toreturn += [_DPhie1j1,_DPhie1j2,_DPhie1j3,_DPhie1j4,_DPhie1j5]
toreturn += [_mtuv,_mtev,_htjets]
toreturn += [_jetcount,_mucount,_elcount]
return toreturn
##########################################################################################
################# BELOW IS THE ACTUAL LOOP OVER ENTRIES #######################
##########################################################################################
# Please don't edit here. It is static. The kinematic calulations are the only thing to edit!
lumisection = array.array("L",[0])
t.SetBranchAddress("ls",lumisection)
for n in range(N):
# This is the loop over events. Due to the heavy use of functions and automation of
# systematic variations, this loop is very small. It should not really be editted,
# except possibly to add a new flag or weight variable.
# All editable contents concerning kinematics are in the function defs.
# Get the entry
t.GetEntry(n)
# if n > 1000: # Testing....
# break
if n%1000==0:
print 'Procesing event',n, 'of', N # where we are in the loop...
## =========================== BASIC SETUP ============================= ##
# print '-----'
# Assign Weights
weight_central[0] = startingweight*GetPUWeight(t,'Central','Basic')
weight_pu_down[0] = startingweight*GetPUWeight(t,'SysDown','Basic')
weight_pu_up[0] = startingweight*GetPUWeight(t,'SysUp','Basic')
weight_central_2012D = startingweight*GetPUWeight(t,'Central','2012D')
weight_nopu[0] = startingweight
if dopdf:
exec(FillPDFWeights(t))
# Event Flags
run_number[0] = t.run
# event_number[0] = int(t.event)
event_number[0] = t.event
lumi_number[0] = lumisection[0]
GoodVertexCount[0] = CountVertices(t)
if t.isData == True:
pass_HLTMu40_eta2p1[0] = PassTrigger(t,["HLT_Mu40_eta2p1_v"],1) # Data Only
passTriggerObjectMatching[0] = 1*(True in t.MuonHLTSingleMuonMatched) # Data Only
passBPTX0[0] = 1*(t.isBPTX0) # Unused, Data only: MC = 0
passBeamScraping[0] = 1*(1-t.isBeamScraping) # Used, Data only
passTrackingFailure[0] = 1*(1-t.isTrackingFailure) # Used, Data only
passBadEESuperCrystal[0] = 1*(1-t.passBadEESupercrystalFilter) # Used, Data only
passEcalLaserCorr[0] = 1*(t.passEcalLaserCorrFilter) # Used, Data only
passHcalLaserEvent[0] = 1*(1-t.passHcalLaserEventFilter) # Used, Data only
passPhysDeclared[0] = 1*(t.isPhysDeclared)
else:
pass_HLTMu40_eta2p1[0] = PassTrigger(t,["HLT_Mu40_eta2p1_v"],1)
passTriggerObjectMatching[0] = 1
passBPTX0[0] = 1
passBeamScraping[0] = 1
passTrackingFailure[0] = 1
passBadEESuperCrystal[0] = 1
passEcalLaserCorr[0] = 1
passHcalLaserEvent[0] = 1
passPhysDeclared[0] = 1
passPrimaryVertex[0] = 1*(t.isPrimaryVertex) # checked, data+MC
passHBHENoiseFilter[0] = 1*(t.passHBHENoiseFilter) # checked, data+MC
passBeamHalo[0] = 1*(t.passBeamHaloFilterTight) # checked, data+MC
passEcalDeadCellBE[0] = 1*(1-t.passEcalDeadCellBoundaryEnergyFilter) # Checked, data + MC
passEcalDeadCellTP[0] = 1*(1-t.passEcalDeadCellTriggerPrimitiveFilter) # Checked, data + MC
passDataCert[0] = 1
if ( (t.isData==True) and (CheckRunLumiCert(t.run,lumisection[0]) == False) ) : passDataCert[0] = 0
## =========================== Calculate everything! ============================= ##
# Looping over systematic variations
for v in _variations:
# All calucations are done here
calculations = FullKinematicCalculation(t,v)
# Now cleverly cast the variables
for b in range(len(_kinematicvariables)):
exec(_kinematicvariables[b]+v+'[0] = calculations['+str(b)+']')
## =========================== Skim out events ============================= ##
# Feel like skimming? Do it here. The syntax is just {branchname}[0] > blah, or whatever condition
# you want to impose. The [0] is because pyroot passes everything as an array of length 1.
# BE MINDFUL: Just because the central (non-systematic) quantity meets the skim, does not mean
# that the systematic varied quantity will, and that will throw off systematics calculations later.
# Make sure your skim is looser than any selection you will need afterward!
if (Pt_muon1[0] < 42): continue
if nonisoswitch != True:
if (Pt_muon2[0] < 42) and (Pt_miss[0] < 35): continue
if (Pt_jet1[0] < 110): continue
if (Pt_jet2[0] < 40): continue
if (St_uujj[0] < 250) and (St_uvjj[0] < 250): continue
# Fill output tree with event
tout.Fill()
# All done. Write and close file.
tout.Write()
fout.Close()
# Timing, for debugging and optimization
print(datetime.now()-startTime)
print ('mv '+tmpfout+' '+finalfout)
os.system('mv '+tmpfout+' '+finalfout)
os.system('rm '+junkfile1)
os.system('rm '+junkfile2)
| [
""
] | |
840a5373373a5c2a4d1b1753bc46c5cc17f1a2c9 | 042bdb1872d8cfe6bac371858073b699eed4dbfd | /Menu2Recipe/lib/attribute.py | 72b322204c21ae95ab52fdb451a06f8a3b03833b | [] | no_license | denglixi/Menu2Recipe | f2abdea4b2f18745bbe085e9780a19cc63757c21 | 7c60d452bc6b3d8d0d05f97b1e8ce69b817d55b9 | refs/heads/master | 2020-08-03T07:44:56.048734 | 2020-02-26T06:17:14 | 2020-02-26T06:17:14 | 211,673,029 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,295 | py | import json
import re
import pandas as pd
def read_attribute_from_csv(attribute_path):
attribute_data = pd.read_csv(attribute_path, error_bad_lines=False, encoding='utf-8')
attribute_data = attribute_data[:6000]
# id, source, food name, entity, shape, taste, cooking method, other, type
attr_entity_set = set()
attr_entity = attribute_data['entity']
for entities in attr_entity:
try:
entities = re.split('[,,]', entities)
for e in entities:
attr_entity_set.add(e)
except:
continue
attr_entity = attr_entity_set
attr_entity = list(attr_entity)
attr_shape = list(set(attribute_data['shape']))
attr_taste = list(set(attribute_data['taste']))
attr_cook = list(set(attribute_data['cooking method']))
attr_other = list(set(attribute_data['other']) | {'大', '小', '大份', '小份', '大杯', '小杯', '(', ')', '(', ')'})
attr_type = list(set(attribute_data['type']))
# attr_shape = set(attribute_data['shape'])
# attr_taste = set(attribute_data['taste'])
# attr_cook = set(attribute_data['cooking method'])
# attr_other = set(attribute_data['other']) | {'大', '小', '大份', '小份', '大杯', '小杯', '(', ')', '(', ')'}
# attr_type = set(attribute_data['type'])
attributes = {'entity': attr_entity,
'shape': attr_shape,
'taste': attr_taste,
'cooking method': attr_cook,
'other': attr_other,
'type': attr_type}
return attributes
def write_attribute_to_json(attributes):
attribute_json_path = './sources/attributes.json'
with open(attribute_json_path, 'w', encoding='utf-8') as f:
json.dump(attributes, f, ensure_ascii=False)
def load_attribute_from_json(attr_json_path):
with open(attr_json_path, 'r', encoding='utf-8') as f:
attributes = json.load(f)
for k in attributes:
attributes[k] = set(attributes[k])
return attributes
if __name__ == '__main__':
attribute_path = './sources/Attribute/recipe_attribute.csv'
attributes = read_attribute_from_csv(attribute_path)
write_attribute_to_json(attributes)
| [
"denglixi.cs@gmail.com"
] | denglixi.cs@gmail.com |
6fe1fbf9bd37922b103cd568ec4742b8c96c13e6 | b36440e3a824e13a1fb02c97bed9f3cd26e7bcac | /Цикл while/python while/A.py | 82ba090b84749962fa2b30f6be046a3134d6f487 | [] | no_license | StaroKep/Python | 6f0af02a18a91bc2d0adaeb66e775eb0de329480 | 9c387bfacb0c80545a306edced4a0568b4a51068 | refs/heads/master | 2021-05-13T17:12:10.838734 | 2020-05-09T07:49:02 | 2020-05-09T07:49:02 | 116,814,545 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 78 | py | n = int(input())
i = 1
while(i**2 <= n):
print(i**2, end=' ')
i += 1
| [
"kirill.eremins@gmail.com"
] | kirill.eremins@gmail.com |
a09763f482d6d5084cb5c84d19ff3eb0a1ff88e6 | 98b99b048e678b713bca6c132a98817672c3a7c6 | /ex13.py | af403c6054a052ee86abe2ad740a6a2395488c53 | [] | no_license | jfletcher/lpthw | 13374fe5561d5bac13c3cbe8b145c501bc875619 | 1a9494a799c178d91b34d083b7d9690957d515da | refs/heads/master | 2021-03-12T23:43:33.184415 | 2014-10-10T14:13:02 | 2014-10-10T14:13:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 237 | py | from sys import argv
script, dogs, cats, birds = argv
print "The script is called:", script
print "Your first variable is called:", dogs
print "Your second variable is called:", cats
print "Your third variable is called:", birds | [
"jamiemfletcher@gmail.com"
] | jamiemfletcher@gmail.com |
983ccfa9baf0620bdd9be3d518892d35ae0a5317 | 358de8b4d12a57405e47826547052bc9afd804f8 | /common/__init__.py | d0caaf7b31cd279f73708f36d0bb9622969bc7ec | [] | no_license | flagrama/guessing-game | bae1071b3aa0116f64daf7519f470294863294d5 | 6613776c878a6266bf7be5979dfd87e87701ab05 | refs/heads/dev | 2022-02-26T07:18:01.138238 | 2019-04-19T05:07:45 | 2019-04-19T05:07:45 | 168,276,745 | 0 | 0 | null | 2019-10-21T18:40:00 | 2019-01-30T04:01:45 | Python | UTF-8 | Python | false | false | 117 | py | from . import listeners
SubscriptionListener = listeners.SubscriptionListener
ListListener = listeners.ListListener
| [
"v.cunningham@flagrama.com"
] | v.cunningham@flagrama.com |
80942bf10851ef7ec9d1f1bd97721c31500f2bfd | e8bdb331b2b753beeaf7ef973fc5eafa0e36b3d3 | /python/snippet.py | 2646ce0fd2b8882489659382e46b5fe900272f34 | [] | no_license | ZiminPark/CodeSnippet | 3a3bb1a390b4fb588b0f0d457cdd8b1aa6c7694b | 6ed1813084ee21c80886009920823f1bff7670d1 | refs/heads/main | 2023-02-17T22:22:42.092981 | 2021-01-22T00:57:24 | 2021-01-22T00:57:24 | 304,079,116 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 182 | py | import os
from random import random
import numpy as np
def set_python_seed(seed: int):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
| [
"48933016+ZiminPark@users.noreply.github.com"
] | 48933016+ZiminPark@users.noreply.github.com |
db7981da24fec5ec3aad85b65db3fefcb9c98ca8 | 6b81d527f1d92cec060f393d33d79381081e4837 | /pythonchallenge05.py | df22bedf2c49628516c4fef300abfdfe33eb5540 | [] | no_license | jlyu/Pythonchallenge | 31491576611e693bfaa85081092efab8799bfcf0 | 09f51aeafd80cb6de82bc70aab813b5a4ec03e58 | refs/heads/master | 2020-04-20T12:02:41.999067 | 2012-04-19T06:28:59 | 2012-04-19T06:28:59 | 4,070,481 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 170 | py | import cPickle as p
content = p.load(file('banner.p'))
f=file('result.txt','w')
for line in content:
f.write(''.join(map(lambda x:x[0]*x[1], line))+'\r\n')
f.close()
| [
"chain119@ymail.com"
] | chain119@ymail.com |
7f36dfe12b290d0c7e0a59226859e5c910bd361b | caa16b9e3aa58e75a1885b540d7ea7979bad0920 | /Lesson2/task_2_substring.py | 3a477e12d9e0563a9e1917e9511a6847bc7f6954 | [
"MIT"
] | permissive | LPetrova/Python | 91f239b4947cce2b6591191b664ba966045dec5a | 0be5939ecfc5f0fecce33fee314bfe534aed8efd | refs/heads/master | 2020-12-24T20:32:58.960177 | 2016-05-23T08:37:35 | 2016-05-23T08:37:35 | 59,465,667 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 234 | py | first_text = input('Enter first string: ')
second_text = input('Enter second string: ')
find_text = first_text.find(second_text)
if find_text == -1:
print(first_text)
else:
print(first_text[find_text + len(second_text):])
| [
"lidiq880908@gmail.com"
] | lidiq880908@gmail.com |
316c071aa0f42d4c8a145727f39807d9c5472d9f | f7bf605fc61fb32fee6f1bcb876412528cabdfe2 | /models/helper.py | 43c1fc3ec96aa7735d208658eff52a39460617ce | [] | no_license | dameyerdave/mongo_change_tracker | ee1519b537a21c09848864dcca5b478cd84aaaf0 | e4dd00f3f5810dbc7590efc05fb365f4ebc5c959 | refs/heads/master | 2023-05-30T15:37:51.038932 | 2021-06-01T13:52:59 | 2021-06-01T13:52:59 | 315,612,475 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 381 | py | def flatten_json(y):
out = {}
def flatten(x, name=''):
if type(x) is dict:
for a in x:
flatten(x[a], name + a + '.')
elif type(x) is list:
i = 0
for a in x:
flatten(a, name + str(i) + '.')
i += 1
else:
out[name[:-1]] = x
flatten(y)
return out
| [
"dameyerdave@gmail.com"
] | dameyerdave@gmail.com |
39f10dc8fd74b3886a2d525993e7c0f9b3061dbd | 697e762b65b091cdc95250bbb638b20fdd8f542c | /greysqale/pool.py | 97ddbe05844498579be51adc922dc03b07659723 | [] | no_license | clueless-skywatcher/greysqale | 6c6df519b1ff5a8551f9a9338bd30351270dead9 | cb4bfff108505dd71598cd38e0ba7359168ed4d5 | refs/heads/main | 2023-07-01T21:10:48.608869 | 2021-07-22T05:40:22 | 2021-07-22T05:40:22 | 387,848,169 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,225 | py | from psycopg2.pool import SimpleConnectionPool, ThreadedConnectionPool
class GSQLPoolmaker:
def __init__(self, minconn, maxconn, db, user, password, host, threaded = False) -> None:
self.minconn = minconn
self.maxconn = maxconn
self.db = db
self.user = user
self.password = password
self.host = host
if not threaded:
self._conn_pool = SimpleConnectionPool(
minconn,
maxconn,
database = db,
user = user,
password = password,
host = host
)
else:
self._conn_pool = ThreadedConnectionPool(
minconn,
maxconn,
database = db,
user = user,
password = password,
host = host
)
def get_connection(self):
return self._conn_pool.getconn()
def put_connection(self):
return self._conn_pool.putconn()
def __enter__(self):
self.connection = self._conn_pool.getconn()
return self.connection
def __exit__(self, ):
self._conn_pool.putconn(self.connection) | [
"somichat@gmail.com"
] | somichat@gmail.com |
4aeb41f78d945aca9081a90c0cbaee43ac0f7f36 | b688fe5c3b565067bd84a7b6d0b9d00971a0bfa0 | /wow_python/wow/src/wow.py | b7e6c8d16c8a64c1fd664a4aaa9ce28f431dbfa7 | [] | no_license | timotheeguerin/archive | d81e830b688d7e690d18a744053ef9afe2afc799 | 0dbe1a734d499b52d21955d2fc342d1cc15693ae | refs/heads/master | 2021-05-28T02:20:45.391697 | 2014-06-19T21:41:15 | 2014-06-19T21:41:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,357 | py | from src.action.build.builder import Builder
from src.action.push.uploader import Uploader
from src.action.extract.extractor import Extractor
from src.action.uninstall.uninstaller import Uninstaller
class Wow:
options = {}
def run(self, options):
self.options = options
if options['extract']:
self.extract()
elif options['install']:
self.install()
elif options['uninstall']:
self.uninstall()
elif options['build']:
self.build()
elif options['push']:
self.push()
elif options['use']:
self.use()
#Extract a wow file
def extract(self):
extractor = Extractor()
extractor.extract(self.options['<filename>'])
#Download a
def install(self):
print('install')
def uninstall(self):
uninstaller = Uninstaller()
for package in self.options['<package>']:
uninstaller.uninstall(package)
def build(self):
print('Building')
builder = Builder()
if self.options['<platform>'] is not None:
builder.platform = self.options['<platform>']
builder.build()
def push(self):
uploader = Uploader()
uploader.upload(self.options['<file>'][0])
def use(self):
print('Using version x of package y') | [
"timothee.guerin@outlook.com"
] | timothee.guerin@outlook.com |
156a46d0724c96b05f4385c7cfff71c90aef2f0d | 374c4e05e61fc5af136705e06e219c93d131acb9 | /Undergrad/CK0154 - SISTEMAS DISTRIBUÍDOS/SocketVsMQTT/Sockets/client_sensors.py | b04d977f3da10761d392ff3f3445ee2f9e222f40 | [] | no_license | TioMinho/UFC_Courses | af8b3ad1f9ea79f7ff2ff5ce93321436b05e269b | 66f8c6f068b0e1b9612d7984db1da3cdb9fc01c0 | refs/heads/master | 2020-07-12T05:56:11.825637 | 2020-02-09T04:31:09 | 2020-02-09T04:31:09 | 204,736,948 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,163 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from equips.Sensor import Sensor
from random import random, uniform
from time import sleep
sensors = []
sensors.append(Sensor("Temperatura", "Sala", 24.0, "C", [-99,99], "127.0.0.1", 33000))
sensors.append(Sensor("Temperatura", "Sala", 24.0, "C", [-99,99], "127.0.0.1", 33000))
sensors.append(Sensor("Temperatura", "Cozinha", 24.0, "C", [-99,99], "127.0.0.1", 33000))
sensors.append(Sensor("Água", "Cozinha", 20, "L", [0,20], "127.0.0.1", 33000))
sensors.append(Sensor("Gás", "Cozinha", 0, "ppm", [0,100], "127.0.0.1", 33000))
sensors.append(Sensor("Temperatura", "Quarto", 24.0, "C", [-99,99], "127.0.0.1", 33000))
sensors.append(Sensor("Temperatura", "Entrada", 24.0, "C", [-99,99], "127.0.0.1", 33000))
sensors.append(Sensor("Presença", "Entrada", 0, "", [0,1], "127.0.0.1", 33000))
sensors.append(Sensor("Temperatura", "Banheiro", 24.0, "C", [-99,99], "127.0.0.1", 33000))
try:
while True:
sleep(.5)
for s in sensors:
if(random() < 0.5):
s.sensor_var = s.sensor_var+uniform(-0.01,0.01)
s.clipper()
except KeyboardInterrupt:
print("Exiting...")
for s in sensors:
S.client_socket.close() | [
"minhotmog@gmail.com"
] | minhotmog@gmail.com |
7abf3ba062755a3465f207dc24afbd08d30c8e91 | c00138a1e147aaec6047b763dd9747f54575b7ee | /config.py | 0d308794f842b81c6f9a972a3f662d164f064f09 | [] | no_license | hjalmar99/review-site | 50bf49b331b278780fd58b57ecc5d7bab64f1df6 | b2a88ce4e7c3965a0c10b8b69dadb0cff6849556 | refs/heads/master | 2021-01-24T20:40:12.614920 | 2018-05-25T13:19:30 | 2018-05-25T13:19:30 | 123,255,078 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 638 | py | import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config():
SECRET_KEY = os.environ.get('SECRET_KEY') or 'very hard to guess string'
SQLALCHEMY_TRACK_MODIFICATIONS = False
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
DEBUG = True
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'db.sqlite')
class TestingConfig(Config):
TESTING = True
class ProductionConfig(Config):
pass
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig,
'default': DevelopmentConfig
}
| [
"hjalmarniemi@hotmail.com"
] | hjalmarniemi@hotmail.com |
0a94b29872442b7ac0be1a53efbe3f54e5eb093c | 0220a954c8d85532cb8a1e3e010760fd3ce3fc3f | /A3-COVID19-Tracker/src/config.py | cf6eff99b908004e2f1a090706db72907d58d236 | [] | no_license | pchen2134/ECE1779-Cloud-Computing | 1abd530467ca0f75505ac680fbb58654aa17be9e | cb1fcad277f16b29d010d4ff0a9f98d60fec25b1 | refs/heads/master | 2022-12-08T00:31:38.577376 | 2020-08-26T01:15:40 | 2020-08-26T01:15:40 | 290,349,095 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 248 | py | SECRET_KEY = 'A3'
COUNTRY_STAT_URL = 'https://corona.lmao.ninja/countries?sort=country'
GLOBAL_STAT_URL = 'https://corona.lmao.ninja/v2/all'
MAPBOX_TOKEN = 'pk.eyJ1IjoicGNoZW5nMTEiLCJhIjoiY2s4d2lpMDE0MHVsbzNlcWhvaTUwbWJvaCJ9.EokT9cbv_067a6-jMCUO4w' | [
"noreply@github.com"
] | pchen2134.noreply@github.com |
7de09a9222326952cb90dafda60f11d2ddd3059b | 6dbdf2fba718f909116e5df14c6681cf3b7caa4f | /src/predict/Gradient_Boosting_Regressor.py | 41d9c94cba95f6119b5e294c36b4e366bd29c104 | [] | no_license | abtpst/Home-Depot | fd30999ae4154f3d92803c6833679e19e5b1fc47 | 326af73d6fe0289bde4b6cc73342882b0615b02a | refs/heads/master | 2021-01-18T23:59:25.953140 | 2016-11-17T16:52:51 | 2016-11-17T16:52:51 | 72,806,177 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,517 | py | '''
Created on Oct 31, 2016
@author: abhijit.tomar
'''
import warnings
warnings.filterwarnings('ignore')
import time
import pandas as pd
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.metrics import make_scorer
from sklearn import grid_search
import random
random.seed(2016)
import Helper_Tools
import json
RMSE = make_scorer(Helper_Tools.custom_mean_squared_error, greater_is_better=False)
def fit_predict_gbr():
start_time = time.time()
# Load the features/attributes
X_train,y_train,X_test,id_test = Helper_Tools.generate_train_test_splits('../../resources/data/dframes/final.csv')
print('--- Features Set: %s minutes ---' % round(((time.time() - start_time) / 60), 2))
print('Number of Features: ', len(X_train.columns.tolist()))
print(X_train.columns.tolist())
'''
# Initialize GradientBoostingRegressor
gbr = GradientBoostingRegressor(random_state=2016, verbose=1)
# Set up possible values for hyper-parameters. These would be used by GridSearch to derive optimal set of hyper-parameters
param_grid = {
'n_estimators': [500],
'max_features': [10,20,50,100],
'subsample': [0.8]
}
# Generate optimal model using GridSearchCV
model = grid_search.GridSearchCV(estimator=gbr, param_grid=param_grid, n_jobs=5, cv=10, verbose=20, scoring=RMSE)
# Fit the training data on the optimal model
print ('Fitting')
model.fit(X_train, y_train)
# Show the best parameters and save
print('--- Grid Search Completed: %s minutes ---' % round(((time.time() - start_time) / 60), 2))
print('Best Params:')
print(model.best_params_)
json.dump(model.best_params_,open('../../resources/data/params/'+type(gbr).__name__+'_params.json','w'))
print('Best CV Score:')
print(model.best_score_)
# Predict using the optimal model
y_pred = model.predict(X_test)
'''
gbr = GradientBoostingRegressor(random_state=2016, max_features=100, n_estimators=500,verbose=100, subsample=0.8)
gbr.fit(X_train, y_train)
y_pred=gbr.predict(X_test)
for i in range(len(y_pred)):
if y_pred[i] < 1.0:
y_pred[i] = 1.0
if y_pred[i] > 3.0:
y_pred[i] = 3.0
# Save the submission
pd.DataFrame({'id': id_test, 'relevance': y_pred}).to_csv('../../resources/results/'+type(gbr).__name__+'submission.csv', index=False)
print('--- Submission Generated: %s minutes ---' % round(((time.time() - start_time) / 60), 2))
fit_predict_gbr() | [
"abhijit.tomar@Abhijits-MacBook-Pro.local"
] | abhijit.tomar@Abhijits-MacBook-Pro.local |
24728b5f65466078cfed81969e8b513e5db0886a | 0f94ebbee84cda7e8f87c2369ee48280dc144752 | /Algorithm/spsict_test/test5.py | 19c8baec19b2e3734a1f3ca1c4ff5e974bd0dbce | [
"MIT"
] | permissive | zionhan/TIL | 8def3247a8534bc5cb9e01bf1f793ad1a115bf5d | 2b74bf3f977ead3432bde64e9826f505af58de26 | refs/heads/master | 2020-09-01T10:22:36.341420 | 2020-01-10T06:39:31 | 2020-01-10T06:39:31 | 218,939,145 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 597 | py | def solution( participant, completion ):
answer = ""
dic_com = {}
for name in completion :
if( name in dic_com ) :
dic_com[name] +=1
else :
dic_com[name] = 1
for name in participant :
if( name not in dic_com or dic_com[name] == 0 ) :
answer = name
break
else :
dic_com[name] -= 1
return answer
participant = ['mislav', 'stanko', 'mislav', 'ana' ]
completion = ['stanko', 'mislav', 'ana' ]
print( solution( participant, completion ) )
| [
"zionhan0708@gmail.com"
] | zionhan0708@gmail.com |
db90f94cd66581b090cd255f3e2bded1ea8a0beb | 5d3b2d500ebad108d8f340fb08beee597ec31133 | /details.py | 71c2e1dc1a4b165d202ea564c6e4a0b8041eeb1f | [] | no_license | chrisjpurdy/scalping_bot | d7e9948e16a1b787ce44aeed16e8f8bbc93bfcc9 | bd02bb6c2b978fa15a53b59946a06abab56c39b6 | refs/heads/main | 2023-04-27T06:09:29.861310 | 2021-05-12T16:16:11 | 2021-05-12T16:16:11 | 366,775,585 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 89 | py | # Fill these with IG developer details
IG_USERNAME = ""
IG_API_KEY = ""
IG_PASSWORD = "" | [
"psycjpu@exmail.nottingham.ac.uk"
] | psycjpu@exmail.nottingham.ac.uk |
df3d824b534e4d65e2aea4874bd8c57c0fa1cddf | 28739dd69d94f676a106404135cdd87e72fd1df7 | /LeetCode/151_Reverse_Words_in_a_String.py | 26b584cb372db94a5c61366ba6bc37ce9cf75db0 | [] | no_license | HappyRocky/pythonAI | 74962bbf7e88b0f968214062ea5de8bc79d2c158 | a1e624f0afc24ea5f159fa66fed178aa61bb0179 | refs/heads/master | 2022-12-09T15:03:02.426104 | 2020-10-29T01:56:47 | 2020-10-29T01:56:47 | 115,279,044 | 2 | 9 | null | null | null | null | UTF-8 | Python | false | false | 1,752 | py | # -*- coding: utf-8 -*-
"""
Given an input string, reverse the string word by word.
Note:
A word is defined as a sequence of non-space characters.
Input string may contain leading or trailing spaces. However, your reversed string should not contain leading or trailing spaces.
You need to reduce multiple spaces between two words to a single space in the reversed string.
给定一个输入字符串,将字符串以单词为单位进行反转。
备注:
一个单词指的是中间没有空格的字符序列。
输入字符串可能会包含前缀空格或者后缀空格。但是,反转后的字符串不能包含前缀空格或后缀空格。
你需要在反转后的字符串中将多个连续空格合并成单个空格。
Example 1:
Input: "the sky is blue"
Output: "blue is sky the"
Example 2:
Input: " hello world! "
Output: "world! hello"
Explanation: Your reversed string should not contain leading or trailing spaces.
Example 3:
Input: "a good example"
Output: "example good a"
Explanation: You need to reduce multiple spaces between two words to a single space in the reversed string.
"""
def reverseWords(s: str) -> str:
"""
从前往后扫描一遍,根据空格识别单词
"""
start = -1 # 一个单词的起始字符位置
result = ''
s = s + ' ' # 末尾追加空格,便于收集最后一个单词
for i in range(len(s)):
if s[i] == ' ' and start != -1:
if not result:
result = s[start:i]
else:
result = s[start:i] + ' ' + result
start = -1
elif s[i] != ' ' and start == -1:
start = i
return result
if '__main__' == __name__:
s = "the sky is blue"
print(reverseWords(s))
| [
"gongyanshang1@jd.com"
] | gongyanshang1@jd.com |
f9a6eabc73126f9743000ab270f10142912b7210 | e5e3f0609704b2eba76648d7e51106f2a3c9aa53 | /tests/test_assert_remover.py | 1a1ee0134588c8c61d2d77dea9e359028f150462 | [] | no_license | leonardt/ast_tools | 43aca6e6f849f028397d861991e532b52a8c664a | 3cf9151463db286d7ec16de1ba73062625f5045d | refs/heads/master | 2022-06-20T07:19:25.225353 | 2022-06-14T20:31:42 | 2022-06-14T20:31:42 | 198,655,150 | 17 | 7 | null | 2023-09-13T22:24:35 | 2019-07-24T14:46:57 | Python | UTF-8 | Python | false | false | 418 | py | from ast_tools.passes import apply_passes, remove_asserts
import inspect
def test_remove_asserts():
@apply_passes([remove_asserts()])
def foo():
if True:
assert False
for i in range(10):
assert i == 0
assert name_error
foo()
assert inspect.getsource(foo) == f'''\
def foo():
if True:
pass
for i in range(10):
pass
pass
'''
| [
"donovick@cs.stanford.edu"
] | donovick@cs.stanford.edu |
68d80aabd964ecc0c03b3c58dbb4409ea535aea0 | 9381d2a25adac95fab9fc4b8015aadd6c7bed6ca | /ITP1/8_A.py | cf8098d7b4993f62b0cc1f7fe90d16e436e1b142 | [] | no_license | kazuma104/AOJ | e3ca14bd31167656bcd203d4f92a43fd4045434c | d91cc3313cbfa575928787677e5ed6be63aa8acf | refs/heads/master | 2023-03-20T22:16:22.764351 | 2021-03-18T10:38:08 | 2021-03-18T10:38:08 | 262,047,590 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 235 | py | def solve():
S = input()
T = ""
for i in range(len(S)):
if S[i] >= 'a':
T += S[i].upper()
else:
T += S[i].lower()
print(T)
if __name__ == '__main__':
solve() | [
"kazuma@info.nara-k.ac.jp"
] | kazuma@info.nara-k.ac.jp |
ee9de5bfd33d645b63200dc5993c74631349a6e6 | 62be73d655965905202fd859131ebe3effd9c823 | /onewireMonitor_py3.py | 4aa09fbd466b9e4090872a24156a47d44c03897a | [] | no_license | SkullKill/onewireMonitor | 3b930bba2d2b6796100b86a490e398db90b93000 | c763207a679483b77069d24e78cd94081970f913 | refs/heads/master | 2020-07-17T08:08:10.654482 | 2019-09-03T03:58:57 | 2019-09-03T03:58:57 | 205,980,572 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,657 | py | #!/usr/bin/python3
#########################################################################################
#
# oneWireMonitor
#
# Written by Simon Kong for the Raspberry Pi
# V1.0 03/09/2019
#
# Monitor onewire temp directory and hard reset power to the DS18B20 if folder is not present.
import signal
import RPi.GPIO as GPIO
import time
import os
import datetime
# handle kill signal
def handle_exit(sig, frame):
raise(SystemExit)
# Handle kill signal
def setup_OSsignal():
signal.signal(signal.SIGTERM, handle_exit)
print("\n\n{} - starting OneWire monitor".format(datetime.datetime.now()))
setup_OSsignal()
try:
GPIO.setmode(GPIO.BCM) # Broadcom pin-numbering scheme
GPIO.setup(17, GPIO.OUT)
while True:
# uncomment the next line, to log when the next cycle is starthing
print("{} - Starting new cycle".format(datetime.datetime.now()))
##############################################################################################
# change this according to the folder / DS18B20 serial to monitor ****************************
if (os.path.isdir("/sys/bus/w1/devices/28-XXXXXXXXXXXX") == False):
print("{} - Reseting OneWire".format(datetime.datetime.now()))
GPIO.output(17, GPIO.LOW)
time.sleep(3)
GPIO.output(17, GPIO.HIGH)
#time.sleep(5)
# sleep for 50 sec
time.sleep(50)
except KeyboardInterrupt:
print("Keyboard Inturrupt detected")
except SystemExit:
print("kill signal detected")
except:
print("Some other error detected")
finally:
# eigher way, do this before exit
print("{} - cleanning up GPIO pins".format(datetime.datetime.now()))
GPIO.cleanup()
| [
"noreply@github.com"
] | SkullKill.noreply@github.com |
755c8410856fd9a634ed73e87e50ec135313c22b | 1f3bed0bb480a7d163dab73f1d315741ecbc1072 | /vtkplotter_examples/pyplot/plot7_stream.py | 04cb003af2b8799fd539ccebed6d1317312814c5 | [
"MIT"
] | permissive | ismarou/vtkplotter-examples | 1ce78197182da7496b016b27f1d5eb524c49cac6 | 1eefcc026be169ab7a77a5bce6dec8044c33b554 | refs/heads/master | 2021-03-11T18:43:22.313457 | 2020-03-03T22:11:25 | 2020-03-03T22:11:25 | 246,551,341 | 4 | 0 | null | 2020-03-11T11:18:48 | 2020-03-11T11:18:47 | null | UTF-8 | Python | false | false | 672 | py | """Plot streamlines of the 2D field:
u(x,y) = -1 - x^2 + y
v(x,y) = 1 + x - y^2
"""
from vtkplotter import *
import numpy as np
# a grid with a vector field (U,V):
X, Y = np.mgrid[-5:5 :15j, -4:4 :15j]
U = -1 - X**2 + Y
V = 1 + X - Y**2
# optionally, pick some random points as seeds:
prob_pts = np.random.rand(200, 2)*8 - [4,4]
sp = streamplot(X,Y, U,V,
lw=0.001, # line width in abs. units
direction='forward', # 'both' or 'backward'
probes=prob_pts, # try comment out this
)
pts = Points(prob_pts, r=5, c='white')
show(sp, pts,
Text2D(__doc__, c='w'),
axes=1, bg='bb')
| [
"marco.musy@gmail.com"
] | marco.musy@gmail.com |
fcdb40fc418ba887744f4884cf220c01f70a7fe4 | 0a1175e7c117161be37332f5377eb3cbb9704d92 | /tests/test_args.py | fadd6d50df1c4cc86259f470d65909da899d3b22 | [
"BSD-2-Clause"
] | permissive | charnley/rmsd | 3897276339f32f59557852d76ee14646e535816e | dcca78e7abb4ab605871074044ceb0ae31ba9409 | refs/heads/master | 2023-04-08T23:23:44.450248 | 2023-01-06T17:03:02 | 2023-01-06T17:03:02 | 9,659,218 | 432 | 133 | BSD-2-Clause | 2023-01-06T17:01:22 | 2013-04-24T22:46:14 | Python | UTF-8 | Python | false | false | 2,096 | py | import pytest
from rmsd import calculate_rmsd
def test_formats() -> None:
args_ = "filename.xyz.gz filename2.xyz.gz".split()
args = calculate_rmsd.parse_arguments(args_)
assert args.format_is_gzip
def test_legal_arguments() -> None:
args_ = "--rotation kabsch --ignore-hydrogen FILE_A FILE_B".split()
args = calculate_rmsd.parse_arguments(args_)
assert args.reorder is False
assert args.ignore_hydrogen is True
assert args.rotation == "kabsch"
def test_illegal_arguments() -> None:
with pytest.raises(SystemExit):
args = calculate_rmsd.parse_arguments(
"--reorder --ignore-hydrogen --print filea fileb".split()
)
print(args)
with pytest.raises(SystemExit):
args = calculate_rmsd.parse_arguments(
"--print --ignore-hydrogen --use-reflections filea fileb".split()
)
print(args)
with pytest.raises(SystemExit):
args = calculate_rmsd.parse_arguments("--rotation do-not-exists filea fileb".split())
print(args)
with pytest.raises(SystemExit):
args = calculate_rmsd.parse_arguments(
"--reorder --reorder-method do-not-exists filea fileb".split()
)
print(args)
def test_illegal_reflection() -> None:
args = [
"--rotation kabsch",
"--use-reflections",
"--print",
"--ignore-hydrogen",
"FILE_A",
"FILE_B",
]
with pytest.raises(SystemExit) as exception:
_ = calculate_rmsd.parse_arguments(args)
assert exception.type == SystemExit
def test_illegal_rotation_method() -> None:
args = ["--rotation NeverHeardOfThisMethod", "FILE_A", "FILE_B"]
with pytest.raises(SystemExit) as exception:
_ = calculate_rmsd.parse_arguments(args)
assert exception.type == SystemExit
def test_illegal_reorder_method() -> None:
args = ["--reorder-method NotImplementedYet", "FILE_A", "FILE_B"]
with pytest.raises(SystemExit) as exception:
_ = calculate_rmsd.parse_arguments(args)
assert exception.type == SystemExit
| [
"noreply@github.com"
] | charnley.noreply@github.com |
7c945592d39eb2f6680b846f93d8f8921188613c | 0ed9a8eef1d12587d596ec53842540063b58a7ec | /cloudrail/knowledge/rules/rules_loader.py | 443b78c547b07c45631f401fffc28e2ebc664574 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | cbc506/cloudrail-knowledge | 8611faa10a3bf195f277b81622e2590dbcc60da4 | 7b5c9030575f512b9c230eed1a93f568d8663708 | refs/heads/main | 2023-08-02T08:36:22.051695 | 2021-09-13T15:23:33 | 2021-09-13T15:24:26 | 390,127,361 | 0 | 0 | MIT | 2021-07-27T21:08:06 | 2021-07-27T21:08:06 | null | UTF-8 | Python | false | false | 1,710 | py | import functools
from typing import Dict, Optional
from cloudrail.knowledge.context.cloud_provider import CloudProvider
from cloudrail.knowledge.exceptions import UnsupportedCloudProviderException
from cloudrail.knowledge.rules.base_rule import BaseRule
from cloudrail.knowledge.rules.aws_rules_loader import AwsRulesLoader
from cloudrail.knowledge.rules.azure_rules_loader import AzureRulesLoader
from cloudrail.knowledge.rules.gcp_rules_loader import GcpRulesLoader
class RulesLoader:
@classmethod
def load(cls, cloud_provider: Optional[CloudProvider] = None) -> Dict[str, BaseRule]:
if not cloud_provider:
return {**AwsRulesLoader().load(), **AzureRulesLoader().load(), **GcpRulesLoader().load()}
if cloud_provider == CloudProvider.AMAZON_WEB_SERVICES:
return AwsRulesLoader().load()
if cloud_provider == CloudProvider.AZURE:
return AzureRulesLoader().load()
if cloud_provider == CloudProvider.GCP:
return GcpRulesLoader().load()
raise UnsupportedCloudProviderException(cloud_provider)
@classmethod
@functools.lru_cache(maxsize=None)
def get_rules_source_control_links(cls) -> Dict[str, str]:
rules = cls.load()
source_control_links = {}
for rule_id, rule in rules.items():
rule_module = rule.__module__
if not rule_module.startswith('cloudrail.knowledge'):
continue
rule_path = rule_module.replace('.', '/')
source_control_link = f'https://github.com/indeni/cloudrail-knowledge/blob/main/{rule_path}.py'
source_control_links[rule_id] = source_control_link
return source_control_links
| [
"ori.bar.emet@gmail.com"
] | ori.bar.emet@gmail.com |
5b13e1e596d6f5f2c2723a35aa5e5af013c3fe2c | 6205c3d3d6eb47d3b067dcca5d2e6a785a53cce4 | /handlers/Fork_of_Stops_or_Numbers.py | 7a2c9cbeae4e69b27acb6c8934533e1304caa1c7 | [] | no_license | Neferpitou1337/BusSheduleTelegram | fa538e958309f574a516e139dd42163a3dbe8125 | 49a78e92d9934ec1ae2f8a3cd95702bcf802fbd0 | refs/heads/master | 2022-12-20T01:49:07.767453 | 2020-10-18T13:32:50 | 2020-10-18T13:32:50 | 292,309,073 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,893 | py | import hashlib
import logging
from telebot import types
import RefreshDB
import etc
import userTableWorker
from etc import bot
"""
Fork
"""
# пришлось делать функцию здесь потому что питон выдавал ошибку насчет рекрсивного импорта с backresetreplybutton
def GetBackResetMarkup():
markup = types.ReplyKeyboardMarkup(row_width=2, resize_keyboard=1)
markup.row(types.KeyboardButton("/back"), types.KeyboardButton("/reset"))
return markup
# get directions from table and make 2 buttons inside bot
@bot.message_handler(
func=lambda message: userTableWorker.getState(message.chat.id) == etc.States.S_ENTER_NUMBER_OR_STOP.value,
content_types=['text'])
def numberandStopHandler(message):
# logging.info("%s is in fork", message.chat.id)
stopslesserthan4 = ["БТИ", "ЦУМ", "ЦМТ", "БТК", "АП", "ТЭЦ", "ЦГБ", "ПСО", "ФОК", "ДСУ"]
numberOfSymbolstoFindSimilar = 4
# проверка на обновление дб
if RefreshDB.isRefreshing():
bot.send_message(message.chat.id, text="Подождите пару минут, идет обновление базы данных")
return 0
if message.text.upper() not in etc.NUMBERS_OF_BUSES:
if len(message.text) < numberOfSymbolstoFindSimilar:
if message.text.upper() not in stopslesserthan4:
bot.reply_to(message,
"Попытайтесь написать русскими буквами или такой остановки нет, или такого номера автобуса не "
"существует, или Создатель не знает об их появлении.\n"
"Если же вы пытались искать по остановке, то нужно как минимум 4 символа")
else:
stopsHandler(message, [message.text.upper()])
else:
similarStops = userTableWorker.getSimilarStops(message.text)
if similarStops == []:
bot.reply_to(message, "Такой остановки не сущеставует")
else:
stopsHandler(message, similarStops)
else:
numberHandler(message)
def numberHandler(message):
route = message.text.upper()
dirs = userTableWorker.getDirections(route)
# creation of direction buttons
markup = types.InlineKeyboardMarkup()
# using hash to define dirs
hash_dirs0 = hashlib.md5(dirs[0].encode())
hash_dirs1 = hashlib.md5(dirs[1].encode())
itembtn1 = types.InlineKeyboardButton(text=dirs[0], callback_data=hash_dirs0.hexdigest())
itembtn2 = types.InlineKeyboardButton(text=dirs[1], callback_data=hash_dirs1.hexdigest())
markup.add(itembtn1)
markup.add(itembtn2)
bot.send_message(message.chat.id, "Ответ сервера: ", reply_markup=GetBackResetMarkup())
mess_id = bot.send_message(message.chat.id,message.text + "\nВыберите направление:", reply_markup=markup).message_id
# updating table userdecision
userTableWorker.setAll(message.chat.id, route, None, None, etc.States.S_CHOOSE_DIR.value, mess_id)
def stopsHandler(message, similarStops):
# creation of stops buttons
markup = types.InlineKeyboardMarkup()
for sS in similarStops:
markup.add(types.InlineKeyboardButton(text=sS, callback_data=sS))
bot.send_message(message.chat.id, "Ответ сервера: ", reply_markup=GetBackResetMarkup())
mess_id = bot.send_message(message.chat.id, "\nВыберите остановку:", reply_markup=markup).message_id
# updating table userdecision
userTableWorker.setAll(message.chat.id, None, None, None, etc.States.S2_STOP_HANDLER.value, mess_id) | [
"isachenko.foxrider@yandex.by"
] | isachenko.foxrider@yandex.by |
0b19decd4e28ad7ce7fbc924f2cd55619c3ede35 | 2140f3476db805c4a0f437da903b3b71799f5c95 | /hack_assembler/h_parser.py | a6c0a218177e313053135a9717fd6caa57941f2c | [] | no_license | andren/fhl_hackathon_2020_hack_assembler | fd5333df91064cc260dd1a5388a7f376eada243e | 607ddf1191208de9d0d3b80fa6fafc9afb0a6b5c | refs/heads/master | 2023-02-02T06:16:42.998788 | 2020-11-06T13:05:17 | 2020-11-06T13:05:17 | 310,599,824 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,533 | py | import os
from pathlib import Path
import re
from enum import Enum
#from hack_assembler.hack_command_factory import hack_command_factory, HackCommandType, HackCommand
import h_command_factory as h_fac
class Parser:
"""Breaks each assembly command into its underlying components.
Encapsulates access to the input code. Reads an assembly language command, parses it, and
provides convenient access to the command’s components (fields and symbols). In addition,
removes all white space and comments.
Note: each line is a command.
"""
# --- Properties ---
commandList: [h_fac.HackCommand]
currentCommand: h_fac.HackCommand
# --- Methods ---
# TODO 'in-line A-inst': implement support for "D=A [2]" format. Currently only "@2","D=A" format is supported.
def __init__(self, asm_lines: [str]):
"""Constructor/Initializer - Opens the input file and gets ready to parse it
Constructor/Initializer - Opens the input file and gets ready to parse it
:param inputFilePath: Path to the .asm file to be parsed, relative to the assembler project root directory
:type inputFilePath: Path
"""
# remove comments and whitespace
r_asmCommentsAndWhitespace = r"([^\n\S]*\/\/[^\n]*)|([\s]*)"
for i in range(len(asm_lines)):
asm_lines[i] = re.sub(r_asmCommentsAndWhitespace, '', asm_lines[i]) # ASM Comments and whitespace are replaced by nothing
asm_lines[i] = re.sub('\n', '', asm_lines[i]) # \n is left over and replaced by nothing
# remove empty elements from asm_lines
asm_lines = list(filter(None, asm_lines))
# use factory to translate a list of strings with potential Assembly commands to a list of Hack Assembly commands
self.commandList = []
for line in asm_lines:
self.commandList.append(h_fac.hack_command_factory(line))
def hasMoreCommands(self) -> bool:
"""Are there more commands in the input?
"""
if not self.commandList:
return False
else:
return True
def advance(self):
"""Reads the next command from the input and makes it the current command.
Should be called only if hasMoreCommands() is true. Initially there is no current command.
"""
self.currentCommand = self.commandList.pop(0)
def commandType(self) -> h_fac.HackCommandType:
"""Returns the type of the current command.
A_COMMAND for @Xxx where Xxx is either a symbol or a decimal number.
C_COMMAND for dest=comp;jump.
L_COMMAND for (Xxx) where Xxx is a symbol (this is actually a pseudo-command).
"""
return self.currentCommand.cmdType
def symbol(self) -> str:
"""Returns the symbol or decimal Xxx of the current command @Xxx or (Xxx).
Should be called only when commandType() is A_COMMAND or L_COMMAND.
"""
if self.currentCommand.cmdType == h_fac.HackCommandType.C_COMMAND:
raise ValueError("Invalid current command type:", self.currentCommand.cmdType)
try:
return int(self.currentCommand.symbol)
except ValueError:
raise NotImplementedError("Symbol Table not yet implemented, only asm literals supported for now")
def dest(self) -> str:
"""Returns the dest mnemonic in the current C_COMMAND (8 possibilities).
Should be called only when commandType() is C_COMMAND.
"""
if self.currentCommand.cmdType != h_fac.HackCommandType.C_COMMAND:
raise ValueError("Invalid current command type:", self.currentCommand.cmdType)
return self.currentCommand.dest
def comp(self) -> str:
"""Returns the comp mnemonic in the current C_COMMAND (28 possibilities).
Should be called only when commandType() is C_COMMAND.
"""
if self.currentCommand.cmdType != h_fac.HackCommandType.C_COMMAND:
raise ValueError("Invalid current command type:", self.currentCommand.cmdType)
return self.currentCommand.comp
def jump(self) -> str:
"""Returns the jump mnemonic in the current C_COMMAND (8 possibilities).
Should be called only when commandType() is C_COMMAND.
"""
if self.currentCommand.cmdType != h_fac.HackCommandType.C_COMMAND:
raise ValueError("Invalid current command type:", self.currentCommand.cmdType)
return self.currentCommand.jump
| [
"40470926+andren@users.noreply.github.com"
] | 40470926+andren@users.noreply.github.com |
6b8f766ca5c36c08d991f16d786e1e5eb5988a2f | 24c7a7a8ec30708ef5151a78d6b8b2fea4a89169 | /backend/accounts/views.py | 621ae2c5294f8632f584d73227e0123a97dd620f | [] | no_license | CatalinAtanase/VeganApp | bb90a0e1c2e848afca6963db3953acfebcec14e5 | 2997bab7ff80c07061e8bd58500f6e3693c90f9c | refs/heads/main | 2023-02-12T02:57:35.319911 | 2021-01-07T18:44:40 | 2021-01-07T18:44:40 | 305,431,903 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,167 | py | from django.conf import settings
import jwt
from rest_framework.generics import CreateAPIView, UpdateAPIView
from accounts.utils import generate_access_token, set_refresh_token, generate_refresh_token
from django.shortcuts import render
from rest_framework import viewsets, exceptions, status
from rest_framework.response import Response
from .serializers import ChangePasswordSerializer, UserSerializer
from .models import User
from django.contrib.auth import get_user_model
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework.decorators import api_view, permission_classes, authentication_classes
from django.views.decorators.csrf import ensure_csrf_cookie
from django.views.decorators.csrf import requires_csrf_token
class UserViewSet(viewsets.ModelViewSet):
queryset = User.objects.all()
serializer_class = UserSerializer
@api_view(['POST'])
@permission_classes([AllowAny])
@authentication_classes([])
@ensure_csrf_cookie
def login(request):
User = get_user_model()
email = request.data.get('email')
password = request.data.get('password')
response = Response()
if (email is None) or (password is None):
raise exceptions.AuthenticationFailed(
'email and password required')
user = User.objects.filter(email=email).first()
if(user is None):
raise exceptions.AuthenticationFailed('Wrong username/password') #Change to invalid credentials
if (not user.check_password(password)):
raise exceptions.AuthenticationFailed('Wrong username/password')
serialized_user = UserSerializer(user).data
user.token_version = user.token_version + 1
user.save()
access_token, access_token_lifetime = generate_access_token(user)
response.data = {
'access_token': access_token,
'user': serialized_user,
'access_token_lifetime': access_token_lifetime
}
if settings.DEBUG:
set_refresh_token(user, user.token_version, response)
refresh_token = generate_refresh_token(user, user.token_version)
response.data['refresh_token'] = refresh_token
else:
set_refresh_token(user, user.token_version, response)
return response
@api_view(['POST'])
@permission_classes([AllowAny])
@authentication_classes([])
@ensure_csrf_cookie
@requires_csrf_token
def refresh_token(request):
'''
To obtain a new access_token this view expects 2 important things:
1. a cookie that contains a valid refresh_token
2. a header 'X-CSRFTOKEN' with a valid csrf token, client app can get it from cookies "csrftoken"
'''
User = get_user_model()
refresh_token = request.COOKIES.get('refresh_token')
response = Response()
if refresh_token is None:
raise exceptions.AuthenticationFailed(
'Authentication credentials were not provided. Cookie missing')
try:
payload = jwt.decode(
refresh_token, settings.REFRESH_TOKEN_SECRET, algorithms=['HS256'])
except jwt.ExpiredSignatureError:
raise exceptions.AuthenticationFailed(
'expired refresh token, please login again.')
except jwt.DecodeError:
raise exceptions.AuthenticationFailed(
'Invalid token.')
user = User.objects.filter(id=payload.get('user_id')).first()
if user is None:
raise exceptions.AuthenticationFailed('User not found')
if not user.is_active:
raise exceptions.AuthenticationFailed('Inactive account')
if payload['token_version'] != user.token_version:
raise exceptions.AuthenticationFailed('Invalid Token')
access_token, access_token_lifetime = generate_access_token(user)
response.data = {
'access_token': access_token,
'user': UserSerializer(user).data,
'access_token_lifetime': access_token_lifetime,
}
if settings.DEBUG:
set_refresh_token(user, user.token_version, response)
refresh_token = generate_refresh_token(user, user.token_version)
response.data['refresh_token'] = refresh_token
else:
set_refresh_token(user, user.token_version, response)
return response
class ChangePasswordView(UpdateAPIView):
serializer_class = ChangePasswordSerializer
def update(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
user = serializer.save()
response = {
'status': 'success',
'code': status.HTTP_200_OK,
'message': 'Password updated successfully',
'data': []
}
# return new token
return Response(response, status=status.HTTP_200_OK)
class CreateUserView(CreateAPIView):
model = get_user_model()
permission_classes = [
AllowAny # Or anon users can't register
]
authentication_classes = []
serializer_class = UserSerializer
@api_view(['POST'])
@requires_csrf_token
@ensure_csrf_cookie
def test(request):
response = Response()
serialized_user = UserSerializer(request.user).data
response.data = {
'user': serialized_user
}
return response
| [
"atanasecatalin20@gmail.com"
] | atanasecatalin20@gmail.com |
ca2ebc824317a711c5118a6cd2c5ee8dd9062f25 | c130290d880fa15c8a6e8e7e0c33e40e4e91aa45 | /forwarder/main/urls.py | 01d4b5bdfd0e4463c7dbccc4045fe2d3bc6670b1 | [] | no_license | agamova/url-forwarder | df6eac0a1517ef8b35ae84762bac0006e374f28c | a557056affe798c2c7f3c8a5f944e94657d6dabf | refs/heads/main | 2023-06-22T04:18:54.875562 | 2021-07-11T12:41:38 | 2021-07-11T12:41:38 | 383,585,718 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 171 | py | from django.urls import path
from . import views
urlpatterns = [
#path('', views.index, name='index'),
path('<slug:slug>/', views.forwarder, name='forwarder'),
] | [
"evgenia@agamova.ru"
] | evgenia@agamova.ru |
18d0046b2f0225fe6b04fe89ec0db7e296019405 | bfea8126df0a258714089634d26acec0b9e49e0d | /apipipeline/__init__.py | ab5a556b16109aa3dc68dc7054bb83edfcaeeba0 | [] | no_license | general-programming/tumblrpipeline | bc5860aaa38a56eda1d8efac623787b04a59ce2c | 89d7404eda7d6c1e712eb113b0bfbf6ed6a99950 | refs/heads/master | 2020-04-11T16:53:15.118953 | 2018-12-18T09:39:41 | 2018-12-18T09:42:01 | 161,940,219 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | import os
import logging
# Initialize Sentry before importing the rest of the app.
sentry_sdk = None
if "SENTRY_DSN" in os.environ:
import sentry_sdk
sentry_sdk.init(
dsn=os.environ["SENTRY_DSN"],
)
if "DEBUG" in os.environ:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
logging.getLogger("asyncio").setLevel(logging.DEBUG) | [
"support@generalprogramming.org"
] | support@generalprogramming.org |
8fa999908e6e1e5ccdf32537ba7b2911586b09ce | e16fb30d185ed9c8df379ac78c401ae82aec726c | /A1Part3.py | 897be1af4eb6b8027ceafd33c465157c96cd0cd8 | [] | no_license | kasunator/testRepo | 6b065df4882b46cda43ba7cca9bf45838802f5a6 | e38ce37d1d64cf2972bf4420a726c1d42da164b7 | refs/heads/master | 2021-01-12T04:48:39.089006 | 2017-01-01T22:25:30 | 2017-01-01T22:25:30 | 77,797,992 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 745 | py | """
A1-Part-3: Python array indexing
Write a function that given a numpy array x, returns every Mth element in x, starting from the
first element.
The input arguments to this function are a numpy array x and a positive integer M such that M < number of
elements in x. The output of this function should be a numpy array.
If you run your code with x = np.arange(10) and M = 2, the function should return the following output:
array([0, 2, 4, 6, 8]).
"""
def hopSamples(x,M):
"""
Inputs:
x: input numpy array
M: hop size (positive integer)
Output:
A numpy array containing every Mth element in x, starting from the first element in x.
"""
## Your code here
b=x[0::M]
return b
| [
"bitcraze@bitcraze-vm.unassigned-domain"
] | bitcraze@bitcraze-vm.unassigned-domain |
8f9f14c6e253a488eaf68d83a462898ba45b140a | 921503111718f11ef04a3873cd527406938ee4a9 | /7-Histogram.py | 851222ad845d7a651b9dc4e42081eaa097597cba | [] | no_license | cnntglylmz/coronavirus-data-analysis | 211a1f192576132e5e6523634f01269d4ff81230 | 83814d68ee64ee6df6e518534a49168b6d4b3e17 | refs/heads/main | 2023-09-03T05:56:39.026260 | 2021-10-07T10:41:40 | 2021-10-07T10:41:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 826 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Jan 11 21:28:31 2021
@author: Cennetgül Yılmaz
"""
import pandas as pd
import matplotlib.pyplot as plt
df=pd.read_csv(r"C:\Users\asus\Desktop\Yapay_Zeka_Proje\coronavirus_dataset\covid_19_data.csv")
turkey=df[df["Country/Region"]=="Turkey"]
italy=df[df["Country/Region"]=="Italy"]
spain=df[df["Country/Region"]=="Spain"]
plt.hist(turkey.Deaths,bins=10)
plt.xlabel("Ölüm sayıları")
plt.ylabel("Değer aralığı")
plt.title("Türkiye Coronavirüs Analizi")
plt.show()
"""
plt.hist(italy.Deaths,bins=10)
plt.xlabel("Ölüm sayıları")
plt.ylabel("Değer aralığı")
plt.title("İtalya Coronavirüs Analizi")
plt.show()
plt.hist(spain.Deaths,bins=10)
plt.xlabel("Ölüm sayıları")
plt.ylabel("Değer aralığı")
plt.title("İspanya Coronavirüs Analizi")
plt.show()
""" | [
"cnntglylmz@gmail.com"
] | cnntglylmz@gmail.com |
64974390c8d0616bfe589e88a1236eee0b20d4c5 | 4983dd3aa6c29a4ce5ef3a0f04ff742d0c18ed78 | /dash/migrations/0005_lessonprogress.py | 02aafdd646d57e82c0efb708de16920d37ab1d9e | [] | no_license | adonnelly759/codecontroller | c3eb3cba2a7062b638085fad457041df88f706a1 | 70fd20389eb5c4adb53ae954a23eb1f70e79cd6e | refs/heads/master | 2022-12-13T06:01:14.128477 | 2020-03-08T23:28:54 | 2020-03-08T23:28:54 | 244,388,524 | 0 | 0 | null | 2022-11-23T07:46:13 | 2020-03-02T14:16:11 | CSS | UTF-8 | Python | false | false | 1,119 | py | # Generated by Django 3.0 on 2019-12-21 23:12
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('dash', '0004_auto_20191221_2238'),
]
operations = [
migrations.CreateModel(
name='LessonProgress',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('completed', models.BooleanField(default=False)),
('code', models.TextField()),
('start', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('finish', models.DateTimeField()),
('lesson', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='dash.Lesson')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"adonnelly759@qub.ac.uk"
] | adonnelly759@qub.ac.uk |
ad6c2adee224ae22367d1f19f5ca3eb32d8cc05c | 3ede0bb9bb0d8ea31a16c852e13773601fffd739 | /common/__init__.py | 577ff06f5887b154a5ad83977f1154e579d5ed87 | [
"MIT"
] | permissive | fieri/imgdupes | 39918cfb424cd54f1eb6b94dc2f378114ee78666 | a70a4edc6d3d3cb15376c2b68bbb13ca36c24888 | refs/heads/master | 2020-06-16T14:07:56.758356 | 2019-06-20T14:51:05 | 2019-06-20T14:51:05 | 195,603,190 | 0 | 1 | null | 2019-07-07T02:52:29 | 2019-07-07T02:52:29 | null | UTF-8 | Python | false | false | 18 | py | name = "imgdupes"
| [
"knjcode@gmail.com"
] | knjcode@gmail.com |
7530f15912d33b1d01108d59225addf86e9e09cf | d3d109ee9afb4b785004becff5964c6b80da1745 | /app/models.py | 08b0733783cca73493d99974595a6123e263ce8e | [] | no_license | matbarPL/Distributed-application | 56097c6a337daf12ea8c9928ff2b55d115c5e073 | 68709d25b391f92f53990c2a6d6e5625ca182cb2 | refs/heads/master | 2023-01-23T09:11:01.897677 | 2020-05-31T00:52:23 | 2020-05-31T00:52:23 | 245,972,833 | 0 | 0 | null | 2023-01-06T04:32:38 | 2020-03-09T07:45:12 | Python | UTF-8 | Python | false | false | 1,073 | py | from werkzeug.security import generate_password_hash, check_password_hash
from datetime import datetime, timedelta
import os
from flask_jwt_extended import (create_access_token)
from flask import g
from app import db
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
first_name = db.Column(db.String(120))
last_name = db.Column(db.String(120))
email = db.Column(db.String(120))
password_hash = db.Column(db.String(64))
created = db.Column(db.DateTime, index=True, default=datetime.utcnow)
def set_password(self, password):
self.password_hash = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.password_hash, password)
def to_dict(self):
return {'id': self.id,
'first_name': self.first_name,
'last_name': self.last_name,
'email': self.email}
def __repr__(self):
return '<User first name {} User last name {} Password {}>'.format(self.first_name, self.last_name,self.password_hash) | [
"mateusz.baryla.1996@gmail.com"
] | mateusz.baryla.1996@gmail.com |
f6b845b799f3e15e52f10efd5b2ba60a4d5e1fb8 | da687718aa8ce62974090af63d25e057262e9dfe | /cap12-dicionarios/10_fromkeys_method.py | 59b1594ed08737b3f91bb025905c1d9639f0eab5 | [] | no_license | frclasso/revisao_Python_modulo1 | 77928fa4409c97d49cc7deccdf291f44c337d290 | 1e83d0ef9657440db46a8e84b136ac5f9a7c556e | refs/heads/master | 2020-06-25T05:37:28.768343 | 2019-07-27T22:23:58 | 2019-07-27T22:23:58 | 199,217,969 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 475 | py | #!/usr/bin/env python3
"""retorna um novo dicionário cujas chaves são os elementos de uma sequencia e cujos
valores são todos iguais ao argumento valor.
Sintaxe: dict.fromkeys(seq[, value])
"""
seq = ['name', 'age', 'sex']
dict = dict.fromkeys(seq)
print('Novo dicionario: {}'.format(str(dict))) # nenhum valor foi definido para 'value'
# definido o valor 10 para o argumento value
# dict = dict.fromkeys(seq, 10)
# print('Novo dicionario: {}'.format(str(dict)))
| [
"frcalsso@yahoo.com.br"
] | frcalsso@yahoo.com.br |
f84617d4b0d250050638a579c3f99cc59697e6ce | 4b6a0ac0ab52ae1f2819f7a4368e34db5383c9fd | /api/utils/explainer.py | 58cad004d3277ede180742b585084554a589b269 | [] | no_license | jinwoo1990/ml-streamlit-app | 410e56d166a1ab18c94e0f40ec18782722675779 | b9d428e7225fb9219fab2d644fd34c6cb0484db5 | refs/heads/master | 2023-07-26T00:07:41.619254 | 2021-09-08T17:54:21 | 2021-09-08T17:54:21 | 398,511,805 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 421 | py | import shap
from loggers import logger
def generate_shap_explainer(fitted_model):
"""
shap 값을 계산하기 위한 explainer 와 train data 의 shap 값을 반환하는 함수
:param fitted_model: 학습된 모델 객체
:return: shap explainer, train 데이터 shap values
"""
logger.info('## Generate shap explainer')
explainer = shap.TreeExplainer(fitted_model)
return explainer
| [
"jinwoohong77@gmail.com"
] | jinwoohong77@gmail.com |
04f201d9ffc95af7193be3b65f3977c721df4bcc | c61872b4d9fd6f7caf4fbbbcb914405b75b30866 | /venv/lib/python3.6/site-packages/zope/component/tests/test_registry.py | 39f9f3c5580a05e31c7928ed4f085e322cddcf46 | [
"MIT"
] | permissive | ThomasGarm/portfolio-cv | f7140f618ef4b4bd680186ed1a5f1157fcb7fbe7 | 2d3abb51c9dc991a05939255f39c339ad152b51c | refs/heads/master | 2022-12-08T03:00:25.075362 | 2020-04-30T14:56:11 | 2020-04-30T14:56:11 | 244,865,394 | 1 | 0 | MIT | 2022-12-05T11:40:16 | 2020-03-04T09:58:53 | Python | UTF-8 | Python | false | false | 5,172 | py | ##############################################################################
#
# Copyright (c) 2012 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Tests for z.c.registry
"""
import unittest
from zope.component.tests import fails_if_called
class Test_dispatchUtilityRegistrationEvent(unittest.TestCase):
from zope.component.testing import setUp, tearDown
def _callFUT(self, *args, **kw):
from zope.component.registry import dispatchUtilityRegistrationEvent
return dispatchUtilityRegistrationEvent(*args, **kw)
def test_it(self):
from zope.component import registry
class _Registration(object):
component = object()
_EVENT = object()
_handled = []
def _handle(*args):
_handled.append(args)
with _Monkey(registry, handle=_handle):
self._callFUT(_Registration(), _EVENT)
self.assertEqual(_handled, [(_Registration.component, _EVENT)])
class Test_dispatchAdapterRegistrationEvent(unittest.TestCase):
from zope.component.testing import setUp, tearDown
def _callFUT(self, *args, **kw):
from zope.component.registry import dispatchAdapterRegistrationEvent
return dispatchAdapterRegistrationEvent(*args, **kw)
def test_it(self):
from zope.component import registry
class _Registration(object):
factory = fails_if_called(self)
_registration = _Registration()
_EVENT = object()
_handled = []
def _handle(*args):
_handled.append(args)
with _Monkey(registry, handle=_handle):
self._callFUT(_registration, _EVENT)
self.assertEqual(_handled, [(_registration.factory, _EVENT)])
class Test_dispatchSubscriptionAdapterRegistrationEvent(unittest.TestCase):
from zope.component.testing import setUp, tearDown
def _callFUT(self, *args, **kw):
from zope.component.registry \
import dispatchSubscriptionAdapterRegistrationEvent
return dispatchSubscriptionAdapterRegistrationEvent(*args, **kw)
def test_it(self):
from zope.component import registry
class _Registration(object):
factory = fails_if_called(self)
_registration = _Registration()
_EVENT = object()
_handled = []
def _handle(*args):
_handled.append(args)
with _Monkey(registry, handle=_handle):
self._callFUT(_registration, _EVENT)
self.assertEqual(_handled, [(_registration.factory, _EVENT)])
class Test_dispatchHandlerRegistrationEvent(unittest.TestCase):
from zope.component.testing import setUp, tearDown
def _callFUT(self, *args, **kw):
from zope.component.registry import dispatchHandlerRegistrationEvent
return dispatchHandlerRegistrationEvent(*args, **kw)
def test_it(self):
from zope.component import registry
class _Registration(object):
handler = fails_if_called(self)
_registration = _Registration()
_EVENT = object()
_handled = []
def _handle(*args):
_handled.append(args)
with _Monkey(registry, handle=_handle):
self._callFUT(_registration, _EVENT)
self.assertEqual(_handled, [(_registration.handler, _EVENT)])
class TestBackwardsCompat(unittest.TestCase):
def test_interface_warnings(self):
from zope.component import registry
import warnings
for name in (
'Components',
'_getUtilityProvided',
'_getAdapterProvided',
'_getAdapterRequired',
'UtilityRegistration',
'AdapterRegistration',
'SubscriptionRegistration',
'HandlerRegistration',
):
with warnings.catch_warnings(record=True) as log:
warnings.simplefilter("always")
getattr(registry, name)
self.assertEqual(1, len(log), name)
message = str(log[0].message)
self.assertIn(name, message)
self.assertIn("Import from zope.interface.registry", message)
class _Monkey(object):
# context-manager for replacing module names in the scope of a test.
def __init__(self, module, **kw):
self.module = module
self.to_restore = dict([(key, getattr(module, key)) for key in kw])
for key, value in kw.items():
setattr(module, key, value)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
for key, value in self.to_restore.items():
setattr(self.module, key, value)
| [
"thomas.win@gmail.com"
] | thomas.win@gmail.com |
46118d4edf67b4b6ba7d31735eda41de9fca5bbd | 3ee8c47a2115db04c53cbe900cd7318273ce91e6 | /src/client/TCP_test.py | fa13c15582cece9d08571703726293b235a2239c | [] | no_license | kmintae/CITE | e805ea90147ce7bf7110f1aad2a2de78eff1fbfb | ce09634a7aca3148e8cf62fb32abb898e24a3e6d | refs/heads/master | 2022-10-08T21:29:15.910006 | 2020-06-06T18:57:00 | 2020-06-06T18:57:00 | 269,110,811 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,497 | py | from socket import *
# from C_RobotArm.RobotArmClient import armClient
import yaml
import time
# from C_Motor.Car import Car
# Configurations
config = yaml.load(open("config/client.yaml", 'r'), Loader=yaml.FullLoader)
MAX_BUF_SIZE = 1024
def run_client():
# numInst: Calculating # of Instructions
numInst = 0
clientSock = socket(AF_INET, SOCK_STREAM)
clientSock.connect((config["SERVER_IP_ADDR"], config["SERVER_PORT"]))
# New Car Module
# car = Car()
# robotArm = armClient(config["GPIO_ARM_DIRPINS"], config["GPIO_ARM_STPPINS"], config["ROBOTARM_MIN_ANGLES"],
# config["ROBOTARM_MAX_ANGLES"], config["GPIO_SERVO_PIN"])
# Client Flow 1 : Send Robot_Arm Number & Color Data
sendInfo = str(config["ROBOT_ARM_NUM"]) + " " + str(config["ROBOT_ARM_COLOR"][0]) + " "
sendInfo += str(config["ROBOT_ARM_COLOR"][1]) + " " + str(config["ROBOT_ARM_COLOR"][2]) + " "
sendInfo += str(config["INIT_POS"][0]) + " " + str(config["INIT_POS"][1])
sendInfo = sendInfo.encode()
clientSock.sendall(sendInfo)
# Client Flow 2 : Iteration with While Loop, Executing action for robot arm instructions
while (True):
recvInst = (clientSock.recv(MAX_BUF_SIZE).decode()).split(' ')
if (recvInst[0] == 'RRT'): # Right Rotation
print("#" + numInst + ": RRT " + recvInst[1])
# TODO: car.rotate(float(recv_inst_tok[1]))
elif (recvInst[0] == 'FWD'): # Forward
print("#" + numInst + ": FWD " + recvInst[1])
# TODO: car.move_forward(float(recv_inst_tok[1]))
elif (recvInst[0] == 'ARM'): # Arm Movement
print("#" + numInst + ": ARM " + recvInst[1] + " " + recvInst[2] + " " + recvInst[3] + " " + recvInst[4])
# TODO: ARM arg[1] arg[2] arg[3] state
state = int(recvInst[4])
# TODO : Implement
if (state == 0): # GRAB
# robotArm.work([recv_inst_tok[1], recv_inst_tok[2], recv_inst_tok[3]], True)
pass
else: # Release
pass
elif (recvInst[0] == 'HLT'): # Halt
print("#" + numInst + ": HLT")
elif (recvInst[0] == 'DCN'): # Disconnect
print("#" + numInst + ": DCN")
break
# TODO: Remove (Test Purpose Only)
time.sleep(2)
# Noticing Current Task is totally done.
clientSock.sendall("DONE".encode())
numInst = numInst + 1
clientSock.close() | [
"shsesther928@postech.ac.kr"
] | shsesther928@postech.ac.kr |
7767947cdfb29b1f5344a3ea65c042a25690e0cd | fb141b63e8d3e52e364b9eadc0dbadb42f6a5266 | /prepare_data.py | 6598c365f00831b02dd522ac1420fa8ef96870c7 | [
"MIT"
] | permissive | alexeyev/glyphnet-pytorch | c51c338ab535f4d8f2d2795efcd2c06f557283e6 | 7fd229dc780779a72731085b3a77de38da3cb552 | refs/heads/main | 2023-05-22T13:43:19.409291 | 2022-09-06T06:57:25 | 2022-09-06T06:57:25 | 431,622,962 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,757 | py | # coding: utf-8
"""
Building the dataset trying to follow `glyphreader` repository
as closely as possible; dataset is constructed and split using Pool there,
so the exactly the same split as in `glyphreader` does not seem
to be reproducible; hence we do our own splitting.
"""
import hashlib
import shutil
import logging
from collections import Counter
from os import listdir, makedirs
from os.path import isdir, isfile, join, dirname
import numpy as np
from sklearn.model_selection import train_test_split
UNKNOWN_LABEL = "UNKNOWN"
if __name__ == "__main__":
from argparse import ArgumentParser
ap = ArgumentParser()
ap.add_argument("--data_path", default="/".join(("data", "Dataset", "Manual", "Preprocessed")))
ap.add_argument("--prepared_data_path", default="prepared_data")
ap.add_argument("--test_fraction", type=float, default=0.2)
ap.add_argument("--seed", type=int, default=261)
arguments = ap.parse_args()
# prepare yourself for some hardcode
root = logging.getLogger()
root.setLevel(logging.DEBUG)
file_dir = dirname(__file__)
stele_path = join(file_dir, arguments.data_path)
steles = [join(stele_path, f) for f in listdir(stele_path) if isdir(join(stele_path, f))]
res_image_paths, labels = [], []
for stele in steles:
image_paths = [join(stele, f) for f in listdir(stele) if isfile(join(stele, f))]
for path in image_paths:
res_image_paths.append(path)
labels.append(path[(path.rfind("_") + 1): path.rfind(".")])
list_of_paths = np.asarray(res_image_paths)
labels = np.array(labels)
logging.debug(f"Labels total: {len(set(labels))}")
labels_just_once = np.array([l for (l, c) in Counter(labels).items() if c <= 1])
logging.debug(f"Labels seen just once: {len(labels_just_once)}")
# those hieroglyphs that were seen in data only once, go to TRAIN set
to_be_added_to_train_only = np.nonzero(np.isin(labels, labels_just_once))[0]
# the hieroglyphs that have NO label are to be removed
to_be_deleted = np.nonzero(labels == UNKNOWN_LABEL)[0]
# we remove all elements of these two sets
to_be_deleted = np.concatenate([to_be_deleted, to_be_added_to_train_only])
filtered_list_of_paths = np.delete(list_of_paths, to_be_deleted, 0)
filtered_labels = np.delete(labels, to_be_deleted, 0)
# we split the data
train_paths, test_paths, y_train, y_test = train_test_split(filtered_list_of_paths,
filtered_labels,
stratify=filtered_labels,
test_size=arguments.test_fraction,
random_state=arguments.seed)
# we add the 'single-occurence' folks to the train set
train_paths = np.concatenate([train_paths, list_of_paths[to_be_added_to_train_only]])
y_train = np.concatenate([y_train, labels[to_be_added_to_train_only]])
# then we copy all
makedirs(arguments.prepared_data_path, exist_ok=True)
[makedirs(join(arguments.prepared_data_path, "train", + l), exist_ok=True) for l in set(y_train)]
[makedirs(join(arguments.prepared_data_path, "test", + l), exist_ok=True) for l in set(y_test)]
for fp, label in zip(train_paths, y_train):
fn = join(arguments.prepared_data_path, "train", label, hashlib.md5(fp.encode('utf-8')).hexdigest() + ".png")
shutil.copyfile(fp, fn)
for fp, label in zip(test_paths, y_test):
fn = join(arguments.prepared_data_path, "test", label, hashlib.md5(fp.encode('utf-8')).hexdigest() + ".png")
shutil.copyfile(fp, fn)
| [
"anton.m.alexeyev@gmail.com"
] | anton.m.alexeyev@gmail.com |
eb01f51a86372e61aefb726d9f46b6e7df7b77a1 | d9cf731cf37a3dc056ec5e45f862ac75a6cf6ba1 | /petSearch/apps.py | 3f8155c5114550723e7c305903a750b1ed42bb2f | [] | no_license | zhizhengyang/pet | ac6c499146f3b320cd25c489acf9849eb280517b | 12bea10ea399cdce8c3d58e13e2a426739838a68 | refs/heads/master | 2020-04-06T18:04:42.694866 | 2018-11-21T10:09:57 | 2018-11-21T10:09:57 | 157,684,543 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 93 | py | from django.apps import AppConfig
class PetsearchConfig(AppConfig):
name = 'petSearch'
| [
"413064943@qq.com"
] | 413064943@qq.com |
b2f6ba810f56fe21e915805b75b08d7c0443d9fc | 8fb7a7b4fb09ce457ad413d19191235cf4805851 | /notes code/detection of fail/object_only/scan_mark1/find_thing_on_print_bed.py | 1a6610a59ee5df617f1a2396d94b2e6a3a5120ce | [] | no_license | clambering-goat/honner-progect | df8ab2e22c223cf0f8cb59b93b132eea3d9030f2 | ea996ea34ac13867dea6d4935f9760c6915b206f | refs/heads/master | 2020-04-15T19:32:57.303438 | 2019-05-13T17:51:56 | 2019-05-13T17:51:56 | 164,954,694 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 297 | py |
import numpy as np
import cv2
data=np.load("data.npy")
iamge = data.astype(np.uint8)
for Ycount,y in enumerate(iamge):
for x_count,x in enumerate(y):
if iamge[Ycount][x_count]==255:
iamge[Ycount][x_count] =0
cv2.imshow("frame",iamge)
cv2.waitKey(20000)
| [
"camerondrain@gmail.com"
] | camerondrain@gmail.com |
9179210109a8fa035ce59bb29a6582ddd74d25fd | d9d9a203a27bd28fe9afc72ecc613b186b33d673 | /06_MultipleForm/mainform.py | fe80a611cdf948f4f13e439c5959ffe08d143681 | [] | no_license | wildenali/Belajar-GUI-dengan-pyQT | 378951fcf0e172f48bf71ec46d887599cf5e09ed | 06ebbcbf57bec8a6a63fbb6d5397a7e2ab7c9ef9 | refs/heads/master | 2020-04-06T10:51:58.582049 | 2018-12-31T10:37:56 | 2018-12-31T10:37:56 | 157,395,034 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 577 | py | from PyQt5.QtWidgets import QWidget, QPushButton
from otherform import *
class MainForm(QWidget):
def __init__(self):
super(MainForm, self).__init__()
self.setupUI()
def setupUI(self):
self.resize(300, 500)
self.move(400, 200)
self.setWindowTitle('Form nya ada DUA')
self.button = QPushButton('Muncuuuul kan')
self.button.move(50,50)
self.button.setParent(self)
self.button.clicked.connect(self.buttonClick)
def buttonClick(self):
self.form = OtherForm()
self.form.show()
| [
"wildeeeeen@gmail.com"
] | wildeeeeen@gmail.com |
2181b9b489be977497f5bf973402ba2f939eb01b | 9d0359f43168640175eecd00d045aed4b82b9271 | /DjangoWebProject1/myApp/migrations/0004_auto_20191209_1423.py | e82ed9ccf71ddea4c76be4b96ea1312297a1f4fa | [] | no_license | elya-bar-elya-zhoq/project | 35e036f29bf0639bc8ca35886ebd27cfeb4480cd | bab8696fab9f9883a0b47e23f5e7c6616c9ab5c6 | refs/heads/master | 2020-09-28T07:16:10.301237 | 2019-12-09T12:09:23 | 2019-12-09T12:09:23 | 226,721,464 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 465 | py | # Generated by Django 3.0 on 2019-12-09 08:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myApp', '0003_auto_20191209_1412'),
]
operations = [
migrations.AlterField(
model_name='suggestion',
name='suggestion_author',
field=models.CharField(max_length=12, verbose_name='Автор предложения'),
),
]
| [
"noreply@github.com"
] | elya-bar-elya-zhoq.noreply@github.com |
28598f36e66c74da10b429d228ad8e96cb136f00 | aaf9df2f15ec9bbfb7d98c2239db940117bc6762 | /Algorithmic-Toolbox/covering_segments/covering_segments.py | e33a1707cc450ddd489122ddb82f546ec7713987 | [
"MIT"
] | permissive | ugaliguy/Data-Structures-and-Algorithms | db50a0f4b39908d17fa125ca70c0616f52d895d2 | 4bcbd1b0cff66f442a03d06393f654f8e3a61ded | refs/heads/master | 2021-01-21T14:08:42.127708 | 2016-07-04T00:43:38 | 2016-07-04T00:43:38 | 56,821,728 | 0 | 1 | null | 2016-07-04T00:43:39 | 2016-04-22T02:54:23 | Python | UTF-8 | Python | false | false | 854 | py | # Uses python3
import sys
from collections import namedtuple
from operator import attrgetter
Segment = namedtuple('Segment', 'start end')
def optimal_points(segments):
start_sort = sorted(segments, key=attrgetter('start'))
end_sort = sorted(segments, key=attrgetter('end'))
points = []
#write your code here
minimum = start_sort[0].start - 1
for i in range(len(segments)):
begin = end_sort[i].start
end = end_sort[i].end
if begin > minimum:
points.append(end)
minimum = end
return points
if __name__ == '__main__':
input = sys.stdin.read()
n, *data = map(int, input.split())
segments = list(map(lambda x: Segment(x[0], x[1]), zip(data[::2], data[1::2])))
points = optimal_points(segments)
print(len(points))
for p in points:
print(p, end=' ')
| [
"ugali_guy@hotmail.com"
] | ugali_guy@hotmail.com |
4370545f8a75330aec51c5b699aada3f8df69d5c | 4e4c22dfabb1a0fa89f0f51f58737273412a30e0 | /fort_machine/wsgi.py | 640e612427bbf2c0356ea849505b08617eed3925 | [] | no_license | shaoqianliang/fort_machine | 4cb271d5ef29c924c09172ff397e2af8562ee4ba | cf7e3d4c6682831ce04bcde478930ab7e85abb01 | refs/heads/master | 2020-04-28T15:24:02.056674 | 2019-04-12T23:50:35 | 2019-04-12T23:50:35 | 175,372,042 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | """
WSGI config for fort_machine project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "fort_machine.settings")
application = get_wsgi_application()
| [
"1132424753@qq.com"
] | 1132424753@qq.com |
7a0d2bbea23f8c4ce8b1a8b641c3d88573cb63c6 | c05211611879ddce33152790488e429439676a89 | /sensehat color experiments.py | ece2fb8565108a5bd3a55b98399bf99a8ffaf8fd | [] | no_license | colinkillmer/daughter | 035ff8b6ee709b34d53b009712f01ffdac147c15 | 8dbe3f67747042401f675130622e9e89d79cab0d | refs/heads/master | 2020-05-07T00:56:57.824054 | 2019-04-09T00:13:22 | 2019-04-09T00:13:22 | 180,252,119 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 230 | py | from sense_hat import SenseHat
from time import sleep
sense = SenseHat()
sense.set_rotation(180)
sense.clear((100,100,100))
sleep(1)
sense.show_letter("K", text_colour=[255,255,255], back_colour=[20,20,20])
sleep(5)
sense.clear()
| [
"noreply@github.com"
] | colinkillmer.noreply@github.com |
fe7d0f99e3ae6e1f339a1cd8e4642a724e9016f7 | 1b1e8e73649ad1eed89556a5d479b0a549354fd5 | /opennem/db/migrations/versions/4bf86ff5c8ff_update_indicies_that_aren_t_used.py | a7a46b84637e88e727f1fe594938c21feb0ebb3f | [
"MIT"
] | permissive | zalihat/opennem | 3ea8db7246f350fb0eacf8c6078dbffa4fe9aea2 | 0f82e4fc3fd2bcfbf56a2741d89e4228d017dcf3 | refs/heads/master | 2023-02-27T15:37:47.206336 | 2021-02-08T07:28:57 | 2021-02-08T07:28:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,488 | py | # pylint: disable=no-member
"""
update indicies that aren't used
Revision ID: 4bf86ff5c8ff
Revises: 64987ea01b57
Create Date: 2020-11-23 02:54:29.564574
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = "4bf86ff5c8ff"
down_revision = "64987ea01b57"
branch_labels = None
depends_on = None
def upgrade():
op.drop_index("idx_facility_scada_trading_interval_perth_year")
op.drop_index("idx_facility_scada_trading_interval_perth_month")
op.drop_index("idx_facility_scada_trading_interval_perth_day")
op.drop_index("idx_facility_scada_trading_interval_perth_hour")
op.drop_index("idx_balancing_summary_trading_interval_perth_year")
op.drop_index("idx_balancing_summary_trading_interval_perth_month")
op.drop_index("idx_balancing_summary_trading_interval_perth_day")
op.drop_index("idx_balancing_summary_trading_interval_perth_hour")
op.drop_index("idx_facility_scada_trading_interval_sydney_year")
op.drop_index("idx_facility_scada_trading_interval_sydney_month")
op.drop_index("idx_facility_scada_trading_interval_sydney_day")
op.drop_index("idx_facility_scada_trading_interval_sydney_hour")
op.drop_index("idx_balancing_summary_trading_interval_sydney_year")
op.drop_index("idx_balancing_summary_trading_interval_sydney_month")
op.drop_index("idx_balancing_summary_trading_interval_sydney_day")
op.drop_index("idx_balancing_summary_trading_interval_sydney_hour")
def downgrade():
pass
| [
"git@nikcub.me"
] | git@nikcub.me |
c5c65961c025e5d15baca257d8eb76941fdfed04 | ae3a7a4cc51364a1a08fc532e9bb70dea797189a | /auto_annotator.py | bf6479067f03e5923dc2dc9bee504a4e8b36776a | [] | no_license | Justme21/M.Sc-2017 | 038b0871ed0f6add536d837702ea17f5db4de88a | e70f94e163942041ab905d2df3cf495a7d48ce28 | refs/heads/master | 2018-01-16T12:39:53.134126 | 2017-08-11T19:55:29 | 2017-08-11T19:55:29 | 95,567,668 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,051 | py | #!/usr/bin/python
import math
import matplotlib.pyplot as plt
import random
class DataStore():
def __init__(self,file_name,content_list):
self.name = file_name #This is the full address of datasource
self.file_content = [] #Will hold the desired contents from the file
op_file = open("{}".format(file_name))
for entry in op_file: #Iterate through the rows in the file
#We only include the entries corresponding to the 0-indexed columns specified in content_list
entry = [float(x) for i,x in enumerate(entry.split()) if i in content_list]
if -9.0 not in entry:
self.file_content.append(entry)
self.index = 0 #Initialise the index to 0
self.time = self.file_content[0][0]
def getAll(self,i):
tot_list = []
for row in self.file_content:
if -9 not in row: #This is a sloppy way to omit the problem entries in PROC_LANE_DETECTION
tot_list.append(row[i])
return tot_list
def advance(self):
self.index+=1
if self.index>=len(self.file_content): self.index = None
else:
self.time = self.file_content[self.index][0]
def sampleFromDist(x_t_1,accel,chng,mean,std_dev):
rnd_val = random.random()
k = math.log(3)/(.165*.15) #.165 is approximately the observed magnitude averages for acceleration to both the left and right. REVISE THIS
if x_t_1 == "L":
# chng negative => e^-chng has large value for change of small magnitude
# and small value for large magnitude. Thus for small changes its is unlikely we
# are still going left, and for large changes it is very likely
if rnd_val<1-2/(math.e**(k*(-chng))+1): return "L"
else: return "F"
elif x_t_1 == "R":
if rnd_val<1-2/(math.e**(k*chng)+1): return "R"
else: return "F"
else:
#t_val = (1/math.sqrt(2*math.pi*(std_dev**2)))*math.e**(-((accel-mean)**2)/(2*(std_dev**2)))
#We omit the normalisation so that the distribution we are sampling from has the same range as the random number
t_val = math.e**(-((accel-mean)**2)/(2*(std_dev**2)))
if accel<0 and rnd_val>t_val: return "L"
elif accel>0 and rnd_val>t_val: return "R"
else: return "F"
def probFromDist(z_t,x_t,mean,std_dev,road_width,car_width):
z_l = (road_width/2)+z_t-(car_width/2)
z_r = (road_width/2)-z_t-(car_width/2)
k = math.log(3) #This is based on the assumption that when z_l/z_r = 1 the probability of being in the state should be 1
if x_t not in {"R","L"}:
return (1/math.sqrt(2*math.pi*(std_dev**2)))*math.e**(-((z_t-mean)**2)/(2*(std_dev**2)))
else:
if x_t == "R":
z_rat = z_l/z_r
else:
z_rat = z_r/z_l
if (z_r <.01 and x_t=="R") or (z_l<.01 and x_t=="L"):
return 1.0
return 1-2/(math.e**(k*z_rat)+1) #Functional form of hyperbolic tan (tanh) modified to include k as the slope parameter
def maxState(w,particle_dict):
count_dict = {"L":0,"R":0,"F":0}
cur_max = -1
max_state = None
prev_w = 0
for entry in particle_dict:
count_dict[particle_dict[entry]] += w[entry]-prev_w
prev_w = w[entry]
for entry in count_dict:
if count_dict[entry]>cur_max:
cur_max = count_dict[entry]
max_state = entry
#return count_dict
return (max_state,cur_max)
def advanceAll(source_list):
index_list = []
for source in source_list:
source.advance()
cur_time = max(source.time for source in source_list)
for source in source_list:
while source.time<cur_time and source.index!=None:
source.advance()
index_list.append(source.index)
return index_list
#Locations and storage variables
folder_loc = "Datasets/UAH-DRIVESET-v1/D2/20151120133502-26km-D2-AGGRESSIVE-MOTORWAY/"
#folder_loc = "Datasets/UAH_DRIVESET-v1/D3/20151126110502-26km-D3-NORMAL-MOTORWAY"
files_of_interest = ["RAW_ACCELEROMETERS","PROC_LANE_DETECTION","PROC_VEHICLE_DETECTION"]
entries_of_interest = [[0,1,2,3,4],[0,1,3,4],[0,1,2,3,4]]
#Creating a list with access to all the data_folders we are interested in
datastore_dict = {}
datastore_list = []
for i, entry in enumerate(files_of_interest):
datastore_dict[entry] = DataStore("{0}{1}.txt".format(folder_loc,entry),entries_of_interest[i])
datastore_list.append(datastore_dict[entry])
###NOTE: Here we are assuming that the values observed are simultaneous
### i.e. we assume that the i^th entry in Z is the position observed in the same
### timestep that acceleration a_i was observed
prp_accel = datastore_dict["RAW_ACCELEROMETERS"].getAll(3)
Z = datastore_dict["PROC_LANE_DETECTION"].getAll(1)
road_width = datastore_dict["PROC_LANE_DETECTION"].getAll(2)
data_qual = datastore_dict["PROC_LANE_DETECTION"].getAll(3)
car_width = 1.75 #given in metres
prp_accel = [9.81 * x for x in prp_accel] #Translating this to m/s for common units
accel_chng = [0]+[prp_accel[i]-prp_accel[i-1] for i in range(1,len(prp_accel))]
###NOTE: While this all works fine in theory, in practise the datasets used tend to have missing/
# incorrect values. In the dataset this is represented by a 0 in the last column of the Lane Data
# The issue is that during this time the accelerometer readings are unaffected, but the Z values
# revert to 0, and the road_width is also incorrect. I have not yet decided how to resolve this
mean_accel = sum(prp_accel)/len(prp_accel)
std_dev_accel = math.sqrt((1.0/(len(prp_accel)-1))*sum([(x-mean_accel)**2 for x in prp_accel]))
mean_Z = sum(Z)/len(Z)
std_dev_Z = math.sqrt((1.0/(len(Z)-1))*sum([(x-mean_Z)**2 for x in Z]))
n = 50 #This is probably too many particles, but it works
w_old = [(i+1)*1.0/n for i in range(n)]
w_new = [_ for _ in range(n)]
particle_dict_old = {}
particle_dict_new = {}
rand_val = None
state = None
state_path = []
for i in range(n):
particle_dict_old[i] = "F"
while None not in advanceAll(datastore_list):
w_norm = 0
t = [source.index for source in datastore_list]
for i in range(n):
rand_val = random.random()
j = 0
while w_old[j]<rand_val: j += 1
x_t_1 = particle_dict_old[j]
x_t = sampleFromDist(x_t_1,prp_accel[t[0]],accel_chng[t[0]],mean_accel,std_dev_accel)
w_new[i] = probFromDist(Z[t[1]],x_t,mean_Z,std_dev_Z,road_width[t[1]],car_width)
w_norm += w_new[i]
particle_dict_new[i] = x_t
for i in range(len(w_new)):
if i== 0:
w_new[i]/= w_norm
else:
w_new[i] = w_new[i-1] + w_new[i]/w_norm
particle_dict_old = dict(particle_dict_new)
w_old = list(w_new)
particle_dict_new = {}
w_new = [_ for _ in range(n)]
time = max([source.time for source in datastore_list])
if data_qual[t[1]] in {0,-9}: print("*"),
print("{}:{:02d} ({}) \t {}".format(int(time/60),int(time%60),time,maxState(w_old,particle_dict_old)))
| [
"s1674364@sms.ed.ac.uk"
] | s1674364@sms.ed.ac.uk |
a32a4c5cc53fb4c6aaab36138397a56af955a001 | 77255f8a1fefcdc1367ffa15e75081afeb5c69e3 | /Heimaverkefni/population.py | af125ac4f41af53c458df55c504ef916de191652 | [] | no_license | Mikksterinn/Python_HR_Mikki | 5a8faf47d861f72e615d3cc97aaff70bdf3bcafc | 58e80fd4442d4361033f045ec9357a7afadac1c4 | refs/heads/master | 2020-07-20T05:49:42.238648 | 2019-09-27T19:11:15 | 2019-09-27T19:11:15 | 206,584,453 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 740 | py | ar = float(input("Years: ")) # The input is the number of years in a float format.
folksfjoldi = 307357870 # Number of people at the beginning.
sek = 31536000 # Number of seconds in a year.
sek_tot = ar*sek # Total number of seconds being tested.
ar_heil = int(ar) # Changing from float to integers for the years.
if ar >= 0: # If the input of years is positive or equal to zero we do the calculation.
sum = sek_tot/7 + sek_tot/35 - sek_tot/13 + folksfjoldi # The number of people after x years.
summan = int(sum) # Changing from float to int.
print("New population after ", ar_heil , " years is " , summan ) # printing the outcome.
else: # If the input is negative we print an error message.
print("Invalid input!")
| [
"mikael.ingi@gmail.com"
] | mikael.ingi@gmail.com |
a99c4d3cb68c551d8ecf9d307608d40a13d95cd8 | 7a550d2268bc4bc7e2fec608ffb1db4b2e5e94a0 | /0701-0800/0716-Max Stack/0716-Max Stack.py | 698d5125b3a7f7ace31abc4055e1c827e468fd5e | [
"MIT"
] | permissive | jiadaizhao/LeetCode | be31bd0db50cc6835d9c9eff8e0175747098afc6 | 4ddea0a532fe7c5d053ffbd6870174ec99fc2d60 | refs/heads/master | 2021-11-05T04:38:47.252590 | 2021-10-31T09:54:53 | 2021-10-31T09:54:53 | 99,655,604 | 52 | 28 | MIT | 2020-10-02T12:47:47 | 2017-08-08T05:57:26 | C++ | UTF-8 | Python | false | false | 967 | py | class MaxStack:
def __init__(self):
"""
initialize your data structure here.
"""
self.St = []
self.maxSt = []
def push(self, x: int) -> None:
self.St.append(x)
maxX = x if not self.maxSt or x > self.maxSt[-1] else self.maxSt[-1]
self.maxSt.append(maxX)
def pop(self) -> int:
self.maxSt.pop()
return self.St.pop()
def top(self) -> int:
return self.St[-1]
def peekMax(self) -> int:
return self.maxSt[-1]
def popMax(self) -> int:
maxX = self.maxSt[-1]
buffer = []
while self.St[-1] != maxX:
buffer.append(self.pop())
self.pop()
while buffer:
self.push(buffer.pop())
return maxX
# Your MaxStack object will be instantiated and called as such:
# obj = MaxStack()
# obj.push(x)
# param_2 = obj.pop()
# param_3 = obj.top()
# param_4 = obj.peekMax()
# param_5 = obj.popMax()
| [
"jiadaizhao@gmail.com"
] | jiadaizhao@gmail.com |
03cdd78d3c498e9f197dee72e98fbb2d723db8b9 | c51437ecd70e2baccd064f30a4240b71058aeee3 | /mysite/urls.py | 79ceb09e2698b729b8436631e4708a624f7c2e5a | [] | no_license | mri-dula/polls | 50277715490cd55b9b9a4bd1459b72ac1e207627 | a0c7984fa59364b58080812739d8e48ab5f9cfb5 | refs/heads/master | 2022-02-24T02:13:57.744802 | 2019-07-23T08:49:12 | 2019-07-23T08:49:12 | 198,193,551 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 817 | py | """mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.conf.urls import url, include
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^polls/', include('polls.urls'))
]
| [
"mridulaprabhu@Mridulas-MacBook-Pro.local"
] | mridulaprabhu@Mridulas-MacBook-Pro.local |
8d63dcaac858c907bdd17919a0f0c14d3c28595f | 129ef25f53ff1a921163e0202b7ca0421db3cb59 | /MDS2S/PyEMD/tests/test_visualization.py | 8a1dddf8065bf02aa3d8a47cd2ec3340a2896aec | [] | no_license | Askfk/MDS2SGAN | f057f21e9a43aab4f429791a7f3f38b6440ad3b0 | a32024527b0d531c418008feb6d11782c92d68e9 | refs/heads/master | 2023-03-27T18:37:14.324335 | 2021-03-24T12:51:20 | 2021-03-24T12:51:20 | 315,207,775 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,746 | py | #!/usr/bin/python
# Coding: UTF-8
from __future__ import print_function
import numpy as np
from PyEMD import EMD, EEMD, CEEMDAN
from PyEMD.visualisation import Visualisation
import unittest
class VisTest(unittest.TestCase):
def test_instantiation(self):
emd = EMD()
with self.assertRaises(ValueError):
Visualisation(emd)
def test_instantiation2(self):
t = np.linspace(0, 1, 50)
S = t + np.cos(np.cos(4.*t**2))
emd = EMD()
emd.emd(S, t)
imfs, res = emd.get_imfs_and_residue()
vis = Visualisation(emd)
assert (vis.imfs == imfs).all()
assert (vis.residue == res).all()
def test_check_imfs(self):
vis = Visualisation()
imfs = np.arange(50).reshape(2,25)
res = np.arange(25)
imfs, res = vis._check_imfs(imfs, res, False)
assert len(imfs) == 2
def test_check_imfs2(self):
vis = Visualisation()
with self.assertRaises(AttributeError):
vis._check_imfs(None, None, False)
def test_check_imfs3(self):
vis = Visualisation()
imfs = np.arange(50).reshape(2,25)
vis._check_imfs(imfs, None, False)
def test_check_imfs4(self):
vis = Visualisation()
imfs = np.arange(50).reshape(2,25)
with self.assertRaises(AttributeError):
vis._check_imfs(imfs, None, True)
def test_check_imfs5(self):
t = np.linspace(0, 1, 50)
S = t + np.cos(np.cos(4.*t**2))
emd = EMD()
emd.emd(S, t)
imfs, res = emd.get_imfs_and_residue()
vis = Visualisation(emd)
imfs2, res2 = vis._check_imfs(imfs, res, False)
assert (imfs == imfs2).all()
assert (res == res2).all()
def test_plot_imfs(self):
vis = Visualisation()
with self.assertRaises(AttributeError):
vis.plot_imfs()
# Does not work for Python 2.7 (TravisCI), even with Agg backend
# def test_plot_imfs2(self):
# t = np.linspace(0, 1, 50)
# S = t + np.cos(np.cos(4.*t**2))
# emd = EMD()
# emd.emd(S, t)
# vis = Visualisation(emd)
# vis.plot_imfs()
def test_calc_instant_phase(self):
sig = np.arange(10)
vis = Visualisation()
phase = vis._calc_inst_phase(sig, None)
assert len(sig) == len(phase)
def test_calc_instant_phase2(self):
t = np.linspace(0, 1, 50)
S = t + np.cos(np.cos(4.*t**2))
emd = EMD()
imfs = emd.emd(S, t)
vis = Visualisation()
phase = vis._calc_inst_phase(imfs, 0.4)
assert len(imfs) == len(phase)
def test_calc_instant_phase3(self):
t = np.linspace(0, 1, 50)
S = t + np.cos(np.cos(4.*t**2))
emd = EMD()
imfs = emd.emd(S, t)
vis = Visualisation()
with self.assertRaises(AssertionError):
phase = vis._calc_inst_phase(imfs, 0.8)
def test_calc_instant_freq(self):
t = np.linspace(0, 1, 50)
S = t + np.cos(np.cos(4.*t**2))
emd = EMD()
imfs = emd.emd(S, t)
vis = Visualisation()
freqs = vis._calc_inst_freq(imfs, t, False, None)
assert imfs.shape == freqs.shape
def test_calc_instant_freq(self):
t = np.linspace(0, 1, 50)
S = t + np.cos(np.cos(4.*t**2))
emd = EMD()
imfs = emd.emd(S, t)
vis = Visualisation()
freqs = vis._calc_inst_freq(imfs, t, False, 0.4)
assert imfs.shape == freqs.shape
def test_plot_instant_freq(self):
vis = Visualisation()
t = np.arange(20)
with self.assertRaises(AttributeError):
vis.plot_instant_freq(t)
if __name__ == "__main__":
unittest.main()
| [
"yxl1215@student.bham.ac.uk"
] | yxl1215@student.bham.ac.uk |
ef0c39c82fc23afa076ac6c52052cbcddae6e2de | e83f197765ce10ff4173741d68cedd9c31ef71d3 | /aula-11/json-2.py | 7cc24cfac4404c389e1ade1112edb9cf90bdab27 | [] | no_license | fkbral/python-pro | b1e9ec4adfed80496fab04db6a29251d48a8f86c | ba0020520763cd20f930d65ab310dc2af3625a4f | refs/heads/master | 2023-05-06T04:52:45.071186 | 2021-05-27T01:54:29 | 2021-05-27T01:54:29 | 357,371,003 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 186 | py | import json
jogador = {}
jogador = {
'nome': 'Mario',
'pontuacao' : 0,
}
# jogador['nome'] = 'Mario'
# jogador['pontuacao'] = 0
stringjson = json.dumps(jogador)
print(stringjson) | [
"38699509+fkbral@users.noreply.github.com"
] | 38699509+fkbral@users.noreply.github.com |
ffaa35bbff6e5594111a59aeed63bc26897a2692 | 0b12e31cafa598c163d2cc53706df193a73e31e3 | /people/models.py | 65c21cc6090d290ebf5ac91ed163dedd5de88207 | [] | no_license | getopen/pro | 6a4dba774558e1de0419a4c6daf030ee360d68fd | 97e939d26d9fdaf54f05f3cd4a9b32a6722d0ac3 | refs/heads/master | 2021-07-06T09:35:18.077577 | 2017-09-30T16:07:06 | 2017-09-30T16:07:06 | 100,471,872 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,517 | py | from django.db import models
from django.contrib.auth.models import BaseUserManager, AbstractBaseUser
from django.utils import timezone
import hashlib
import random
import string
from django.conf import settings
SALT = getattr(settings, "EMAIL_TOKEN_SALT")
class MyUserManager(BaseUserManager):
def create_user(self, username, email, password=None):
if not email :
raise ValueError('Users must have an email address')
if not username:
raise ValueError('Users must have an username')
#判断邮件和用户名是否具有
now = timezone.now()
#获取当前django的时间
user = self.model(
username=username,
email=self.normalize_email(email),
date_joined=now,
last_login=now,
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, username, email, password):
user = self.create_user(username,
email,
password=password,
)
user.is_admin = True
user.save(using=self._db)
return user
#新版用户表
class Member(AbstractBaseUser):
#AbstractBaseUser中只含有3个field: password, last_login和is_active.
email = models.EmailField(verbose_name='邮箱',max_length=255,unique=True,)
username = models.CharField(verbose_name="用户名", max_length=16, unique=True)
weibo_id = models.CharField(verbose_name="新浪微博", max_length=30, blank=True)
blog = models.CharField(verbose_name="个人网站", max_length=200, blank=True)
location = models.CharField(verbose_name="城市", max_length=10, blank=True)
profile = models.CharField(verbose_name="个人简介", max_length=140, blank=True)
avatar = models.CharField(verbose_name="头像", max_length=128, blank=True)
au = models.IntegerField(verbose_name="用户活跃度", default=0)
last_ip = models.GenericIPAddressField(verbose_name="上次访问IP", default="0.0.0.0")
email_verified = models.BooleanField(verbose_name="邮箱是否验证", default=False)
date_joined = models.DateTimeField(verbose_name="用户注册时间", default=timezone.now)
topic_num = models.IntegerField(verbose_name="帖子数", default=0)
comment_num = models.IntegerField(verbose_name="评论数", default=0)
is_active = models.BooleanField(default=True)
is_admin = models.BooleanField(default=False)
objects = MyUserManager()
#objects就是我们之前一直使用的管理器
#管理器用来维护我们的增删改查
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username']
def __str__(self):
return self.username
#标签中的数据实例
def is_email_verified(self):
return self.email_verified
#我们可以在模板中,通过实例出来的对象数据进行这个函数的调取,获取他是否验证过
def get_weibo(self):
return self.weibo_id
def get_username(self):
return self.username
#方法的圆括号在templates标签中必需省略!!
def get_email(self):
return self.email
#方法的圆括号在templates标签中必需省略!!
def get_full_name(self):
# The user is identified by their email address
return self.email
#get_full_name本来是获取first_name和last_name的
#但是由于我们重新设置了表结构,那么这个函数必须自定义
#方法的圆括号在templates标签中必需省略!!
def get_short_name(self):
# The user is identified by their email address
return self.username
#get_short_name获取first_name
#但是由于我们重新设置了表结构,那么这个函数必须自定义
#方法的圆括号在templates标签中必需省略!!
def has_perm(self, perm, obj=None):
"Does the user have a specific permission?"
return True
def has_module_perms(self, app_label):
"Does the user have permissions to view the app `app_label`?"
return True
def calculate_au(self):
"""
计算活跃度
公式:Topic * 5 + Comment * 1
"""
self.au = self.topic_num * 5 + self.comment_num * 1
return self.au
@property
#类中函数可以直接做为属性使用
def is_staff(self):
"Is the user a member of staff?"
# Simplest possible answer: All admins are staff
return self.is_admin
class Follower(models.Model):
"""
用户的关系表
B is the follower of A
B 是 A 的关注者
A 被 B 关注
"""
user_a = models.ForeignKey(Member, related_name="user_a",verbose_name='偶像')
user_b = models.ForeignKey(Member, related_name="user_b",verbose_name='粉丝')
date_followed = models.DateTimeField(default=timezone.now,verbose_name='关注时间')
class Meta:
unique_together = ('user_a', 'user_b')
def __str__(self):
return "%s following %s" % (self.user_b, self.user_a)
class EmailVerified(models.Model):
user = models.OneToOneField(Member, related_name="user")
token = models.CharField("Email 验证 token", max_length=32, default=None)
timestamp = models.DateTimeField(default=timezone.now)
def __str__(self):
return "%s@%s" % (self.user, self.token)
def generate_token(self):
year = self.timestamp.year
month = self.timestamp.month
day = self.timestamp.day
date = "%s-%s-%s" % (year, month, day)
token = hashlib.md5((self.ran_str()+date).encode('utf-8')).hexdigest()
return token
def ran_str(self):
salt = ''.join(random.sample(string.ascii_letters + string.digits, 8))
return salt + SALT
class FindPass(models.Model):
user = models.OneToOneField(Member, verbose_name="用户")
token = models.CharField(max_length=32, blank=True)
timestamp = models.DateTimeField(default=timezone.now)
def __str__(self):
return "%s@%s" % (self.user, self.token)
def generate_token(self):
year = self.timestamp.year
month = self.timestamp.month
day = self.timestamp.day
date = "%s-%s-%s" % (year, month, day)
token = hashlib.md5((self.ran_str()+date).encode('utf-8')).hexdigest()
return token
def ran_str(self):
salt = ''.join(random.sample(string.ascii_letters + string.digits, 8))
return salt + SALT
| [
"zhuoqun527@qq.com"
] | zhuoqun527@qq.com |
bc921e782b07b4e292d07092874c199f0e31459a | d500e22bd56b989e0843dc3d9370a986d825129a | /find_the value.py | 6d0f5f9f7e9a243d44b7ce26b35c4e6e5a43298d | [] | no_license | kaminee1jan/dictionary.py | e2cde2a17e0b294c9f3bb8049e4aa75fb9fe2532 | f5f74109cdaa1ea6fd41c25274d9390222c90a75 | refs/heads/main | 2023-06-27T10:34:59.497252 | 2021-07-30T05:06:56 | 2021-07-30T05:06:56 | 382,230,422 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 119 | py | details ={
"name": "Bijender",
"age": 17,
"class": "10th"
}
for x in details.values():
print(x) | [
"noreply@github.com"
] | kaminee1jan.noreply@github.com |
0797925dc14673c3acf56f3d25a3a0549744439a | 760b5b0d38445f414211b6ee145de4a07b8e903b | /kube_hunter/conf/logging.py | bf5ed5a14a7586dec1b0296489606c847de0b976 | [
"Apache-2.0"
] | permissive | openslate/kube-hunter | 8af4c42668fa0c96f8093ba014e54b6d5ec9594e | e010b9db3c41cb02736be24d440264cca39aa6e2 | refs/heads/main | 2023-03-30T12:38:46.347162 | 2021-03-30T19:19:09 | 2021-03-30T19:19:09 | 350,756,073 | 0 | 1 | Apache-2.0 | 2021-03-30T15:17:08 | 2021-03-23T15:07:04 | null | UTF-8 | Python | false | false | 1,158 | py | import logging
DEFAULT_LEVEL = logging.INFO
DEFAULT_LEVEL_NAME = logging.getLevelName(DEFAULT_LEVEL)
LOG_FORMAT = "%(asctime)s %(levelname)s %(name)s %(message)s"
# Suppress logging from scapy
logging.getLogger("scapy.runtime").setLevel(logging.CRITICAL)
logging.getLogger("scapy.loading").setLevel(logging.CRITICAL)
def setup_logger(level_name, logfile):
# Remove any existing handlers
# Unnecessary in Python 3.8 since `logging.basicConfig` has `force` parameter
for h in logging.getLogger().handlers[:]:
h.close()
logging.getLogger().removeHandler(h)
if level_name.upper() == "NONE":
logging.disable(logging.CRITICAL)
else:
log_level = getattr(logging, level_name.upper(), None)
log_level = log_level if isinstance(log_level, int) else None
if logfile is None:
logging.basicConfig(level=log_level or DEFAULT_LEVEL, format=LOG_FORMAT)
else:
logging.basicConfig(filename=logfile, level=log_level or DEFAULT_LEVEL, format=LOG_FORMAT)
if not log_level:
logging.warning(f"Unknown log level '{level_name}', using {DEFAULT_LEVEL_NAME}")
| [
"noreply@github.com"
] | openslate.noreply@github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.