diff --git "a/6416.jsonl" "b/6416.jsonl"
new file mode 100644--- /dev/null
+++ "b/6416.jsonl"
@@ -0,0 +1,774 @@
+{"seq_id":"375642509","text":"import pyaudio\nimport queue\n\nclass Audio:\n '''\n 录音类,使用pyaudio\n '''\n def __init__(self, rate=16000, frames_size=None, channels=None, device_index=None):\n '''\n 录音类初始化\n :param rate:采样率\n :param frames_size:数据帧大小\n :param channels:通道数\n :param device_index:录音设备id\n '''\n self.sample_rate = rate\n self.frames_size = frames_size if frames_size else rate / 100\n self.channels = channels if channels else 1\n\n self.pyaudio_instance = pyaudio.PyAudio()\n\n self.stream = self.pyaudio_instance.open(\n start=False,\n format=pyaudio.paInt16,\n input_device_index=device_index,\n channels=self.channels,\n rate=int(self.sample_rate),\n frames_per_buffer=int(self.frames_size),\n stream_callback=self.__callback,\n input=True\n )\n\n self.sinks = []\n\n def start(self):\n '''\n 开始录音\n :return:\n '''\n self.stream.start_stream()\n\n def stop(self):\n '''\n 结束录音\n :return:\n '''\n self.stream.stop_stream()\n\n def __callback(self, in_data, frame_count, time_info, status):\n '''\n 录音数据(pmc)回调\n :param in_data:录音数据\n :param frame_count:\n :param time_info:\n :param status:\n :return:\n '''\n for sink in self.sinks:\n sink.put(in_data)\n\n return None, pyaudio.paContinue\n\n\n\n\n\nclass SpeechRecognizer(object):\n\n def __init__(self):\n '''\n 类初始化\n :param dueros:DuerOS核心实现模块实例\n '''\n\n self.listening = False\n self.audio_queue = queue.Queue()\n\n def put(self, audio):\n \"\"\"\n 语音pcm输入\n :param audio: S16_LE format, sample rate 16000 bps audio data\n :return: None\n \"\"\"\n if self.listening:\n self.audio_queue.put(audio)\n\n\n def record(self, timeout=10000):\n time_elapsed = 0\n while self.listening or time_elapsed >= timeout:\n try:\n chunk = self.audio_queue.get(timeout=1.0)\n except queue.Empty:\n break\n\n yield chunk\n time_elapsed += 10 # 10 ms chunk\n\n self.listening = False","sub_path":"mic_1.py","file_name":"mic_1.py","file_ext":"py","file_size_in_byte":2383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"74237467","text":"from __future__ import print_function, unicode_literals\nfrom zope.interface import alsoProvides\nfrom twisted.trial import unittest\nfrom twisted.internet.defer import Deferred\nfrom twisted.internet.task import Clock, Cooperator\nfrom twisted.internet.interfaces import IAddress\nimport mock\nfrom ...eventual import EventualQueue\nfrom ..._interfaces import ISend, IDilationManager, ITerminator\nfrom ...util import dict_to_bytes\nfrom ..._dilation import roles\nfrom ..._dilation.encode import to_be4\nfrom ..._dilation.manager import (Dilator, Manager, make_side,\n OldPeerCannotDilateError,\n UnknownDilationMessageType,\n UnexpectedKCM,\n UnknownMessageType)\nfrom ..._dilation.connection import Open, Data, Close, Ack, KCM, Ping, Pong\nfrom .common import clear_mock_calls\n\n\ndef make_dilator():\n reactor = object()\n clock = Clock()\n eq = EventualQueue(clock)\n term = mock.Mock(side_effect=lambda: True) # one write per Eventual tick\n\n def term_factory():\n return term\n coop = Cooperator(terminationPredicateFactory=term_factory,\n scheduler=eq.eventually)\n send = mock.Mock()\n alsoProvides(send, ISend)\n dil = Dilator(reactor, eq, coop)\n terminator = mock.Mock()\n alsoProvides(terminator, ITerminator)\n dil.wire(send, terminator)\n return dil, send, reactor, eq, clock, coop\n\n\nclass TestDilator(unittest.TestCase):\n def test_manager_and_endpoints(self):\n dil, send, reactor, eq, clock, coop = make_dilator()\n d1 = dil.dilate()\n d2 = dil.dilate()\n self.assertNoResult(d1)\n self.assertNoResult(d2)\n\n key = b\"key\"\n transit_key = object()\n with mock.patch(\"wormhole._dilation.manager.derive_key\",\n return_value=transit_key) as dk:\n dil.got_key(key)\n self.assertEqual(dk.mock_calls, [mock.call(key, b\"dilation-v1\", 32)])\n self.assertIdentical(dil._transit_key, transit_key)\n self.assertNoResult(d1)\n self.assertNoResult(d2)\n\n host_addr = dil._host_addr\n\n peer_addr = object()\n m_sca = mock.patch(\"wormhole._dilation.manager._SubchannelAddress\",\n return_value=peer_addr)\n sc = mock.Mock()\n m_sc = mock.patch(\"wormhole._dilation.manager.SubChannel\",\n return_value=sc)\n scid0 = b\"\\x00\\x00\\x00\\x00\"\n\n m = mock.Mock()\n alsoProvides(m, IDilationManager)\n m.when_first_connected.return_value = wfc_d = Deferred()\n with mock.patch(\"wormhole._dilation.manager.Manager\",\n return_value=m) as ml:\n with mock.patch(\"wormhole._dilation.manager.make_side\",\n return_value=\"us\"):\n with m_sca, m_sc as m_sc_m:\n dil.got_wormhole_versions({\"can-dilate\": [\"1\"]})\n # that should create the Manager\n self.assertEqual(ml.mock_calls, [mock.call(send, \"us\", transit_key,\n None, reactor, eq, coop, host_addr, False)])\n # and create subchannel0\n self.assertEqual(m_sc_m.mock_calls,\n [mock.call(scid0, m, host_addr, peer_addr)])\n # and tell it to start, and get wait-for-it-to-connect Deferred\n self.assertEqual(m.mock_calls, [mock.call.set_subchannel_zero(scid0, sc),\n mock.call.start(),\n mock.call.when_first_connected(),\n ])\n clear_mock_calls(m)\n self.assertNoResult(d1)\n self.assertNoResult(d2)\n\n ce = mock.Mock()\n m_ce = mock.patch(\"wormhole._dilation.manager.ControlEndpoint\",\n return_value=ce)\n lep = object()\n m_sle = mock.patch(\"wormhole._dilation.manager.SubchannelListenerEndpoint\",\n return_value=lep)\n\n with m_ce as m_ce_m, m_sle as m_sle_m:\n wfc_d.callback(None)\n eq.flush_sync()\n self.assertEqual(m_ce_m.mock_calls, [mock.call(peer_addr)])\n self.assertEqual(ce.mock_calls, [mock.call._subchannel_zero_opened(sc)])\n self.assertEqual(m_sle_m.mock_calls, [mock.call(m, host_addr)])\n self.assertEqual(m.mock_calls,\n [mock.call.set_listener_endpoint(lep),\n ])\n clear_mock_calls(m)\n\n eps = self.successResultOf(d1)\n self.assertEqual(eps, self.successResultOf(d2))\n d3 = dil.dilate()\n eq.flush_sync()\n self.assertEqual(eps, self.successResultOf(d3))\n\n # all subsequent DILATE-n messages should get passed to the manager\n self.assertEqual(m.mock_calls, [])\n pleasemsg = dict(type=\"please\", side=\"them\")\n dil.received_dilate(dict_to_bytes(pleasemsg))\n self.assertEqual(m.mock_calls, [mock.call.rx_PLEASE(pleasemsg)])\n clear_mock_calls(m)\n\n hintmsg = dict(type=\"connection-hints\")\n dil.received_dilate(dict_to_bytes(hintmsg))\n self.assertEqual(m.mock_calls, [mock.call.rx_HINTS(hintmsg)])\n clear_mock_calls(m)\n\n # we're nominally the LEADER, and the leader would not normally be\n # receiving a RECONNECT, but since we've mocked out the Manager it\n # won't notice\n dil.received_dilate(dict_to_bytes(dict(type=\"reconnect\")))\n self.assertEqual(m.mock_calls, [mock.call.rx_RECONNECT()])\n clear_mock_calls(m)\n\n dil.received_dilate(dict_to_bytes(dict(type=\"reconnecting\")))\n self.assertEqual(m.mock_calls, [mock.call.rx_RECONNECTING()])\n clear_mock_calls(m)\n\n dil.received_dilate(dict_to_bytes(dict(type=\"unknown\")))\n self.assertEqual(m.mock_calls, [])\n self.flushLoggedErrors(UnknownDilationMessageType)\n\n def test_peer_cannot_dilate(self):\n dil, send, reactor, eq, clock, coop = make_dilator()\n d1 = dil.dilate()\n self.assertNoResult(d1)\n\n dil._transit_key = b\"\\x01\" * 32\n dil.got_wormhole_versions({}) # missing \"can-dilate\"\n eq.flush_sync()\n f = self.failureResultOf(d1)\n f.check(OldPeerCannotDilateError)\n\n def test_disjoint_versions(self):\n dil, send, reactor, eq, clock, coop = make_dilator()\n d1 = dil.dilate()\n self.assertNoResult(d1)\n\n dil._transit_key = b\"key\"\n dil.got_wormhole_versions({\"can-dilate\": [-1]})\n eq.flush_sync()\n f = self.failureResultOf(d1)\n f.check(OldPeerCannotDilateError)\n\n def test_early_dilate_messages(self):\n dil, send, reactor, eq, clock, coop = make_dilator()\n dil._transit_key = b\"key\"\n d1 = dil.dilate()\n host_addr = dil._host_addr\n self.assertNoResult(d1)\n pleasemsg = dict(type=\"please\", side=\"them\")\n dil.received_dilate(dict_to_bytes(pleasemsg))\n hintmsg = dict(type=\"connection-hints\")\n dil.received_dilate(dict_to_bytes(hintmsg))\n\n m = mock.Mock()\n alsoProvides(m, IDilationManager)\n m.when_first_connected.return_value = Deferred()\n\n scid0 = b\"\\x00\\x00\\x00\\x00\"\n sc = mock.Mock()\n m_sc = mock.patch(\"wormhole._dilation.manager.SubChannel\",\n return_value=sc)\n\n with mock.patch(\"wormhole._dilation.manager.Manager\",\n return_value=m) as ml:\n with mock.patch(\"wormhole._dilation.manager.make_side\",\n return_value=\"us\"):\n with m_sc:\n dil.got_wormhole_versions({\"can-dilate\": [\"1\"]})\n self.assertEqual(ml.mock_calls, [mock.call(send, \"us\", b\"key\",\n None, reactor, eq, coop, host_addr, False)])\n self.assertEqual(m.mock_calls, [mock.call.set_subchannel_zero(scid0, sc),\n mock.call.start(),\n mock.call.rx_PLEASE(pleasemsg),\n mock.call.rx_HINTS(hintmsg),\n mock.call.when_first_connected()])\n\n def test_transit_relay(self):\n dil, send, reactor, eq, clock, coop = make_dilator()\n dil._transit_key = b\"key\"\n host_addr = dil._host_addr\n relay = object()\n d1 = dil.dilate(transit_relay_location=relay)\n self.assertNoResult(d1)\n\n scid0 = b\"\\x00\\x00\\x00\\x00\"\n sc = mock.Mock()\n m_sc = mock.patch(\"wormhole._dilation.manager.SubChannel\",\n return_value=sc)\n\n with mock.patch(\"wormhole._dilation.manager.Manager\") as ml:\n with mock.patch(\"wormhole._dilation.manager.make_side\",\n return_value=\"us\"):\n with m_sc:\n dil.got_wormhole_versions({\"can-dilate\": [\"1\"]})\n self.assertEqual(ml.mock_calls, [mock.call(send, \"us\", b\"key\",\n relay, reactor, eq, coop, host_addr, False),\n mock.call().set_subchannel_zero(scid0, sc),\n mock.call().start(),\n mock.call().when_first_connected()])\n\n\nLEADER = \"ff3456abcdef\"\nFOLLOWER = \"123456abcdef\"\n\n\ndef make_manager(leader=True):\n class Holder:\n pass\n h = Holder()\n h.send = mock.Mock()\n alsoProvides(h.send, ISend)\n if leader:\n side = LEADER\n else:\n side = FOLLOWER\n h.key = b\"\\x00\" * 32\n h.relay = None\n h.reactor = object()\n h.clock = Clock()\n h.eq = EventualQueue(h.clock)\n term = mock.Mock(side_effect=lambda: True) # one write per Eventual tick\n\n def term_factory():\n return term\n h.coop = Cooperator(terminationPredicateFactory=term_factory,\n scheduler=h.eq.eventually)\n h.inbound = mock.Mock()\n h.Inbound = mock.Mock(return_value=h.inbound)\n h.outbound = mock.Mock()\n h.Outbound = mock.Mock(return_value=h.outbound)\n h.hostaddr = mock.Mock()\n alsoProvides(h.hostaddr, IAddress)\n with mock.patch(\"wormhole._dilation.manager.Inbound\", h.Inbound):\n with mock.patch(\"wormhole._dilation.manager.Outbound\", h.Outbound):\n m = Manager(h.send, side, h.key, h.relay, h.reactor, h.eq, h.coop, h.hostaddr)\n return m, h\n\n\nclass TestManager(unittest.TestCase):\n def test_make_side(self):\n side = make_side()\n self.assertEqual(type(side), type(u\"\"))\n self.assertEqual(len(side), 2 * 6)\n\n def test_create(self):\n m, h = make_manager()\n\n def test_leader(self):\n m, h = make_manager(leader=True)\n self.assertEqual(h.send.mock_calls, [])\n self.assertEqual(h.Inbound.mock_calls, [mock.call(m, h.hostaddr)])\n self.assertEqual(h.Outbound.mock_calls, [mock.call(m, h.coop)])\n\n m.start()\n self.assertEqual(h.send.mock_calls, [\n mock.call.send(\"dilate-0\",\n dict_to_bytes({\"type\": \"please\", \"side\": LEADER}))\n ])\n clear_mock_calls(h.send)\n\n wfc_d = m.when_first_connected()\n self.assertNoResult(wfc_d)\n\n # ignore early hints\n m.rx_HINTS({})\n self.assertEqual(h.send.mock_calls, [])\n\n c = mock.Mock()\n connector = mock.Mock(return_value=c)\n with mock.patch(\"wormhole._dilation.manager.Connector\", connector):\n # receiving this PLEASE triggers creation of the Connector\n m.rx_PLEASE({\"side\": FOLLOWER})\n self.assertEqual(h.send.mock_calls, [])\n self.assertEqual(connector.mock_calls, [\n mock.call(b\"\\x00\" * 32, None, m, h.reactor, h.eq,\n False, # no_listen\n None, # tor\n None, # timing\n LEADER, roles.LEADER),\n ])\n self.assertEqual(c.mock_calls, [mock.call.start()])\n clear_mock_calls(connector, c)\n\n self.assertNoResult(wfc_d)\n\n # now any inbound hints should get passed to our Connector\n with mock.patch(\"wormhole._dilation.manager.parse_hint\",\n side_effect=[\"p1\", None, \"p3\"]) as ph:\n m.rx_HINTS({\"hints\": [1, 2, 3]})\n self.assertEqual(ph.mock_calls, [mock.call(1), mock.call(2), mock.call(3)])\n self.assertEqual(c.mock_calls, [mock.call.got_hints([\"p1\", \"p3\"])])\n clear_mock_calls(ph, c)\n\n # and we send out any (listening) hints from our Connector\n m.send_hints([1, 2])\n self.assertEqual(h.send.mock_calls, [\n mock.call.send(\"dilate-1\",\n dict_to_bytes({\"type\": \"connection-hints\",\n \"hints\": [1, 2]}))\n ])\n clear_mock_calls(h.send)\n\n # the first successful connection fires when_first_connected(), so\n # the Dilator can create and return the endpoints\n c1 = mock.Mock()\n m.connector_connection_made(c1)\n\n self.assertEqual(h.inbound.mock_calls, [mock.call.use_connection(c1)])\n self.assertEqual(h.outbound.mock_calls, [mock.call.use_connection(c1)])\n clear_mock_calls(h.inbound, h.outbound)\n\n h.eq.flush_sync()\n self.successResultOf(wfc_d) # fires with None\n wfc_d2 = m.when_first_connected()\n h.eq.flush_sync()\n self.successResultOf(wfc_d2)\n\n scid0 = b\"\\x00\\x00\\x00\\x00\"\n sc0 = mock.Mock()\n m.set_subchannel_zero(scid0, sc0)\n listen_ep = mock.Mock()\n m.set_listener_endpoint(listen_ep)\n self.assertEqual(h.inbound.mock_calls, [\n mock.call.set_subchannel_zero(scid0, sc0),\n mock.call.set_listener_endpoint(listen_ep),\n ])\n clear_mock_calls(h.inbound)\n\n # the Leader making a new outbound channel should get scid=1\n scid1 = to_be4(1)\n self.assertEqual(m.allocate_subchannel_id(), scid1)\n r1 = Open(10, scid1) # seqnum=10\n h.outbound.build_record = mock.Mock(return_value=r1)\n m.send_open(scid1)\n self.assertEqual(h.outbound.mock_calls, [\n mock.call.build_record(Open, scid1),\n mock.call.queue_and_send_record(r1),\n ])\n clear_mock_calls(h.outbound)\n\n r2 = Data(11, scid1, b\"data\")\n h.outbound.build_record = mock.Mock(return_value=r2)\n m.send_data(scid1, b\"data\")\n self.assertEqual(h.outbound.mock_calls, [\n mock.call.build_record(Data, scid1, b\"data\"),\n mock.call.queue_and_send_record(r2),\n ])\n clear_mock_calls(h.outbound)\n\n r3 = Close(12, scid1)\n h.outbound.build_record = mock.Mock(return_value=r3)\n m.send_close(scid1)\n self.assertEqual(h.outbound.mock_calls, [\n mock.call.build_record(Close, scid1),\n mock.call.queue_and_send_record(r3),\n ])\n clear_mock_calls(h.outbound)\n\n # ack the OPEN\n m.got_record(Ack(10))\n self.assertEqual(h.outbound.mock_calls, [\n mock.call.handle_ack(10)\n ])\n clear_mock_calls(h.outbound)\n\n # test that inbound records get acked and routed to Inbound\n h.inbound.is_record_old = mock.Mock(return_value=False)\n scid2 = to_be4(2)\n o200 = Open(200, scid2)\n m.got_record(o200)\n self.assertEqual(h.outbound.mock_calls, [\n mock.call.send_if_connected(Ack(200))\n ])\n self.assertEqual(h.inbound.mock_calls, [\n mock.call.is_record_old(o200),\n mock.call.update_ack_watermark(200),\n mock.call.handle_open(scid2),\n ])\n clear_mock_calls(h.outbound, h.inbound)\n\n # old (duplicate) records should provoke new Acks, but not get\n # forwarded\n h.inbound.is_record_old = mock.Mock(return_value=True)\n m.got_record(o200)\n self.assertEqual(h.outbound.mock_calls, [\n mock.call.send_if_connected(Ack(200))\n ])\n self.assertEqual(h.inbound.mock_calls, [\n mock.call.is_record_old(o200),\n ])\n clear_mock_calls(h.outbound, h.inbound)\n\n # check Data and Close too\n h.inbound.is_record_old = mock.Mock(return_value=False)\n d201 = Data(201, scid2, b\"data\")\n m.got_record(d201)\n self.assertEqual(h.outbound.mock_calls, [\n mock.call.send_if_connected(Ack(201))\n ])\n self.assertEqual(h.inbound.mock_calls, [\n mock.call.is_record_old(d201),\n mock.call.update_ack_watermark(201),\n mock.call.handle_data(scid2, b\"data\"),\n ])\n clear_mock_calls(h.outbound, h.inbound)\n\n c202 = Close(202, scid2)\n m.got_record(c202)\n self.assertEqual(h.outbound.mock_calls, [\n mock.call.send_if_connected(Ack(202))\n ])\n self.assertEqual(h.inbound.mock_calls, [\n mock.call.is_record_old(c202),\n mock.call.update_ack_watermark(202),\n mock.call.handle_close(scid2),\n ])\n clear_mock_calls(h.outbound, h.inbound)\n\n # Now we lose the connection. The Leader should tell the other side\n # that we're reconnecting.\n\n m.connector_connection_lost()\n self.assertEqual(h.send.mock_calls, [\n mock.call.send(\"dilate-2\",\n dict_to_bytes({\"type\": \"reconnect\"}))\n ])\n self.assertEqual(h.inbound.mock_calls, [\n mock.call.stop_using_connection()\n ])\n self.assertEqual(h.outbound.mock_calls, [\n mock.call.stop_using_connection()\n ])\n clear_mock_calls(h.send, h.inbound, h.outbound)\n\n # leader does nothing (stays in FLUSHING) until the follower acks by\n # sending RECONNECTING\n\n # inbound hints should be ignored during FLUSHING\n with mock.patch(\"wormhole._dilation.manager.parse_hint\",\n return_value=None) as ph:\n m.rx_HINTS({\"hints\": [1, 2, 3]})\n self.assertEqual(ph.mock_calls, []) # ignored\n\n c2 = mock.Mock()\n connector2 = mock.Mock(return_value=c2)\n with mock.patch(\"wormhole._dilation.manager.Connector\", connector2):\n # this triggers creation of a new Connector\n m.rx_RECONNECTING()\n self.assertEqual(h.send.mock_calls, [])\n self.assertEqual(connector2.mock_calls, [\n mock.call(b\"\\x00\" * 32, None, m, h.reactor, h.eq,\n False, # no_listen\n None, # tor\n None, # timing\n LEADER, roles.LEADER),\n ])\n self.assertEqual(c2.mock_calls, [mock.call.start()])\n clear_mock_calls(connector2, c2)\n\n self.assertEqual(h.inbound.mock_calls, [])\n self.assertEqual(h.outbound.mock_calls, [])\n\n # and a new connection should re-register with Inbound/Outbound,\n # which are responsible for re-sending unacked queued messages\n c3 = mock.Mock()\n m.connector_connection_made(c3)\n\n self.assertEqual(h.inbound.mock_calls, [mock.call.use_connection(c3)])\n self.assertEqual(h.outbound.mock_calls, [mock.call.use_connection(c3)])\n clear_mock_calls(h.inbound, h.outbound)\n\n def test_follower(self):\n m, h = make_manager(leader=False)\n\n m.start()\n self.assertEqual(h.send.mock_calls, [\n mock.call.send(\"dilate-0\",\n dict_to_bytes({\"type\": \"please\", \"side\": FOLLOWER}))\n ])\n clear_mock_calls(h.send)\n\n c = mock.Mock()\n connector = mock.Mock(return_value=c)\n with mock.patch(\"wormhole._dilation.manager.Connector\", connector):\n # receiving this PLEASE triggers creation of the Connector\n m.rx_PLEASE({\"side\": LEADER})\n self.assertEqual(h.send.mock_calls, [])\n self.assertEqual(connector.mock_calls, [\n mock.call(b\"\\x00\" * 32, None, m, h.reactor, h.eq,\n False, # no_listen\n None, # tor\n None, # timing\n FOLLOWER, roles.FOLLOWER),\n ])\n self.assertEqual(c.mock_calls, [mock.call.start()])\n clear_mock_calls(connector, c)\n\n # get connected, then lose the connection\n c1 = mock.Mock()\n m.connector_connection_made(c1)\n self.assertEqual(h.inbound.mock_calls, [mock.call.use_connection(c1)])\n self.assertEqual(h.outbound.mock_calls, [mock.call.use_connection(c1)])\n clear_mock_calls(h.inbound, h.outbound)\n\n # now lose the connection. As the follower, we don't notify the\n # leader, we just wait for them to notice\n m.connector_connection_lost()\n self.assertEqual(h.send.mock_calls, [])\n self.assertEqual(h.inbound.mock_calls, [\n mock.call.stop_using_connection()\n ])\n self.assertEqual(h.outbound.mock_calls, [\n mock.call.stop_using_connection()\n ])\n clear_mock_calls(h.send, h.inbound, h.outbound)\n\n # now we get a RECONNECT: we should send RECONNECTING\n c2 = mock.Mock()\n connector2 = mock.Mock(return_value=c2)\n with mock.patch(\"wormhole._dilation.manager.Connector\", connector2):\n m.rx_RECONNECT()\n self.assertEqual(h.send.mock_calls, [\n mock.call.send(\"dilate-1\",\n dict_to_bytes({\"type\": \"reconnecting\"}))\n ])\n self.assertEqual(connector2.mock_calls, [\n mock.call(b\"\\x00\" * 32, None, m, h.reactor, h.eq,\n False, # no_listen\n None, # tor\n None, # timing\n FOLLOWER, roles.FOLLOWER),\n ])\n self.assertEqual(c2.mock_calls, [mock.call.start()])\n clear_mock_calls(connector2, c2)\n\n # while we're trying to connect, we get told to stop again, so we\n # should abandon the connection attempt and start another\n c3 = mock.Mock()\n connector3 = mock.Mock(return_value=c3)\n with mock.patch(\"wormhole._dilation.manager.Connector\", connector3):\n m.rx_RECONNECT()\n self.assertEqual(c2.mock_calls, [mock.call.stop()])\n self.assertEqual(connector3.mock_calls, [\n mock.call(b\"\\x00\" * 32, None, m, h.reactor, h.eq,\n False, # no_listen\n None, # tor\n None, # timing\n FOLLOWER, roles.FOLLOWER),\n ])\n self.assertEqual(c3.mock_calls, [mock.call.start()])\n clear_mock_calls(c2, connector3, c3)\n\n m.connector_connection_made(c3)\n # finally if we're already connected, rx_RECONNECT means we should\n # abandon this connection (even though it still looks ok to us), then\n # when the attempt is finished stopping, we should start another\n\n m.rx_RECONNECT()\n\n c4 = mock.Mock()\n connector4 = mock.Mock(return_value=c4)\n with mock.patch(\"wormhole._dilation.manager.Connector\", connector4):\n m.connector_connection_lost()\n self.assertEqual(c3.mock_calls, [mock.call.disconnect()])\n self.assertEqual(connector4.mock_calls, [\n mock.call(b\"\\x00\" * 32, None, m, h.reactor, h.eq,\n False, # no_listen\n None, # tor\n None, # timing\n FOLLOWER, roles.FOLLOWER),\n ])\n self.assertEqual(c4.mock_calls, [mock.call.start()])\n clear_mock_calls(c3, connector4, c4)\n\n def test_mirror(self):\n # receive a PLEASE with the same side as us: shouldn't happen\n m, h = make_manager(leader=True)\n\n m.start()\n clear_mock_calls(h.send)\n e = self.assertRaises(ValueError, m.rx_PLEASE, {\"side\": LEADER})\n self.assertEqual(str(e), \"their side shouldn't be equal: reflection?\")\n\n def test_ping_pong(self):\n m, h = make_manager(leader=False)\n\n m.got_record(KCM())\n self.flushLoggedErrors(UnexpectedKCM)\n\n m.got_record(Ping(1))\n self.assertEqual(h.outbound.mock_calls,\n [mock.call.send_if_connected(Pong(1))])\n clear_mock_calls(h.outbound)\n\n m.got_record(Pong(2))\n # currently ignored, will eventually update a timer\n\n m.got_record(\"not recognized\")\n e = self.flushLoggedErrors(UnknownMessageType)\n self.assertEqual(len(e), 1)\n self.assertEqual(str(e[0].value), \"not recognized\")\n\n m.send_ping(3)\n self.assertEqual(h.outbound.mock_calls,\n [mock.call.send_if_connected(Pong(3))])\n clear_mock_calls(h.outbound)\n\n def test_subchannel(self):\n m, h = make_manager(leader=True)\n sc = object()\n\n m.subchannel_pauseProducing(sc)\n self.assertEqual(h.inbound.mock_calls, [\n mock.call.subchannel_pauseProducing(sc)])\n clear_mock_calls(h.inbound)\n\n m.subchannel_resumeProducing(sc)\n self.assertEqual(h.inbound.mock_calls, [\n mock.call.subchannel_resumeProducing(sc)])\n clear_mock_calls(h.inbound)\n\n m.subchannel_stopProducing(sc)\n self.assertEqual(h.inbound.mock_calls, [\n mock.call.subchannel_stopProducing(sc)])\n clear_mock_calls(h.inbound)\n\n p = object()\n streaming = object()\n\n m.subchannel_registerProducer(sc, p, streaming)\n self.assertEqual(h.outbound.mock_calls, [\n mock.call.subchannel_registerProducer(sc, p, streaming)])\n clear_mock_calls(h.outbound)\n\n m.subchannel_unregisterProducer(sc)\n self.assertEqual(h.outbound.mock_calls, [\n mock.call.subchannel_unregisterProducer(sc)])\n clear_mock_calls(h.outbound)\n\n m.subchannel_closed(\"scid\", sc)\n self.assertEqual(h.inbound.mock_calls, [\n mock.call.subchannel_closed(\"scid\", sc)])\n self.assertEqual(h.outbound.mock_calls, [\n mock.call.subchannel_closed(\"scid\", sc)])\n clear_mock_calls(h.inbound, h.outbound)\n","sub_path":"src/wormhole/test/dilate/test_manager.py","file_name":"test_manager.py","file_ext":"py","file_size_in_byte":26246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"54582452","text":"import urllib.request\nfrom urllib.parse import urlencode,unquote\n\n\"\"\"\n注意:url连接,编码!英文不编码也可以实现,但为了统一规范,统一编码。\n\"\"\"\n\nWORD = {\"wd\":\"Python教程\"} # 编辑url连接参数字典\nprint(urlencode(WORD)) # url编码\nprint(unquote(urlencode(WORD))) # url解码\n\nURL = \"http://wwww.baidu.com\"\nURL = URL + \"/s?\"+urlencode(WORD) # 拼接新url\n\ndef download(url):\n print(\"访问url:\",url)\n response = urllib.request.urlopen(url)\n print(response.read().decode('utf-8'))\n\n\ndownload(URL)","sub_path":"《尹成python爬虫教程》学习笔记/1,urllib爬虫基础/6,get方法模拟百度搜索.py","file_name":"6,get方法模拟百度搜索.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"209548358","text":"\"\"\"\n Parameter Estimation tests #3.\n\n Robert Clewley, March 2005.\n\"\"\"\nfrom __future__ import print_function\n\n# PyDSTool imports\nfrom PyDSTool import *\nfrom PyDSTool.Toolbox.ParamEst import *\nfrom PyDSTool.Toolbox.neuro_data import *\nimport HH_model_Cintegrator as HH_model\n\n# Other imports\nfrom numpy.linalg import norm\nfrom time import clock\n\n# ----------------------------------------------------------------\n\ntdata = [0, 15]\n\npar_args_HH_goal = {'gna': 100, 'gk': 80, 'gl': 0.1,\n 'vna': 50, 'vk': -100, 'vl': -67,\n 'I': 1.3, 'C': 1.0}\nic_args_HH = {'v':-68, 'm': 0, 'h': 1, 'n': 0}\n\nHH_event_args = args(name='HH_zerothresh',\n eventtol=1e-4,\n eventdelay=1e-3,\n starttime=0,\n active=True)\nHH_thresh_ev = Events.makeZeroCrossEvent('v', 1, HH_event_args, ['v'],\n targetlang='c')\n\nHH_goal = HH_model.makeHHneuron('goalHH', par_args_HH_goal, ic_args_HH,\n evs=HH_thresh_ev,\n extra_terms='-0.04*(sin(9.1*t)*cos(2.6*t)+sin(5.1119*t+2))*(v-60)')\n\n# extra terms simulate low frequency \"noise\"\n\nHH_goal.set(tdata=tdata,\n algparams={'init_step':0.1})\n\ngoaltraj = HH_goal.compute('goalHHtraj')\n\n\nHH_spike_t = HH_goal.getEventTimes()['HH_zerothresh'][0]\nprint(\"HH spike time found at \", HH_spike_t)\n\n\n## Set up test HH model\npar_args_HH_test = {'gna': 100, 'gk': 80, 'gl': 0.12,\n 'vna': 50, 'vk': -100, 'vl': -70,\n 'I': 1.34, 'C': 1.0}\n# Note that I is not the same as that for goal, even though we're not\n# optimizing this parameter. Increasing I from original 1.3 to 1.34\n# causes slow convergence.\n\nDS_event_args = args(name='threshold',\n eventtol=1e-4,\n eventdelay=1e-3,\n starttime=0,\n active=True,\n term=False,\n precise=True)\nthresh_ev = Events.makeZeroCrossEvent('v', 1, DS_event_args, ['v'],\n targetlang='c')\nHH_test = HH_model.makeHHneuron('testHH', par_args_HH_test, ic_args_HH,\n thresh_ev)\n\nHH_test.set(tdata=[0,15], algparams={'atol':1e-9,'rtol':1e-8,\n 'min_step': 1e-5})\n\n\n\n## Set up external interface for the reference trajectory based on spike time\n\ntmesh = goaltraj.indepdomain.sample(dt=(tdata[1]-tdata[0])/100.,\n avoidendpoints=True)\n\n## DATA SPIKE ===================\n\n# quantitative feature\nsp_feat = spike_feature('spike_feat', pars=args(tol=0.6))\n\nspike_condition = condition({sp_feat: True})\n\n# one interface for judging the spike (uses a qual feature to process the ref\n# trajectory)\nis_spike = get_spike_data('is_spike', pars=args(height_tol=1.,\n fit_width_max=1.,\n weight=0,\n width_tol=10,\n noise_tol=0.5,\n thresh_pc=0.15,\n eventtol=1e-5,\n coord='v',\n tlo=tdata[0],\n thi=tdata[1]))\n\nclass ext_spike_iface(extModelInterface):\n def postprocess_test_traj(self, traj):\n # convert traj to individual spike time, value pair\n assert is_spike(traj)\n spike_time = is_spike.results.spike_time\n spike_height = is_spike.results.spike_val\n return numeric_to_traj([[spike_time], [spike_height]], self._trajname,\n ['sptime','spval'],\n indepvar=[0])\n\nspike_interface = ext_spike_iface(goaltraj,\n conditions=spike_condition,\n compatibleInterfaces=['int_spike_iface'])\n\n## DATA GEOM ===================\n\ngeom_feat = geom_feature('geom_feat', pars=args(tol=10,\n tmesh=tmesh,\n depvar='v'))\n\ngeom_condition = condition({geom_feat: True})\n\n# one interface for judging the shape of the V trajectory\nclass ext_geom_iface(extModelInterface):\n pass\n\ngeom_interface = ext_geom_iface(goaltraj,\n conditions=geom_condition,\n compatibleInterfaces=['int_geom_iface'])\n\n# Make model out of HH DS\nHH_test_model = embed(HH_test, ic_args_HH)\nHH_test_model.compute(trajname='orig')\n\nclass int_spike_iface(intModelInterface):\n def postprocess_test_traj(self, traj):\n evpts = traj.getEvents('threshold')\n # catch \"broken\" output and penalize\n if evpts is None:\n ev_t = [300]\n ev_v = [300]\n elif len(evpts) != 1:\n ev_t = [300]\n ev_v = [300]\n else:\n ev_t = evpts['t']\n ev_v = evpts['v']\n return numeric_to_traj([ev_t, ev_v], self._trajname,\n ['sptime', 'spval'],\n indepvar=[0])\n\nclass int_geom_iface(intModelInterface):\n def postprocess_test_traj(self, traj):\n # use tmesh of data points only (may not be the same mesh as was used by\n # this model traj, that's why we have to resample\n varray = traj(tmesh)['v']\n return numeric_to_traj([varray], self._trajname, ['v'],\n indepvar=tmesh)\n\n\npest_context = context([ (spike_interface, int_spike_iface),\n (geom_interface, int_geom_iface) ])\n\nprint(\"Feature evaluation on initial set-up: \", pest_context.evaluate(HH_test_model))\nprint(\"geom feat residual: \", norm(geom_feat.metric.results))\npts1=geom_feat.ref_traj(tmesh,coords=['v'])\npts2=HH_test_model('test_iface_traj', tmesh, coords=['v'])\n#plot(tmesh, pts1['v'])\n#plot(tmesh, pts2['v'])\nprint(\"\\nResidual norm before feature weighting:\")\nprint(norm(pest_context.residual(HH_test_model)))\n#pest_context.set_weights({geom_interface: 0.005})\npest_context.set_weights({geom_interface: 0.005, spike_interface: 0.25})\nprint(\"Residual norm after feature weighting:\")\nprint(norm(pest_context.residual(HH_test_model)))\n\n## Parameter estimation\nprint('\\nEstimating pars gl and vl for fit')\nprint('Goal values are vl =', par_args_HH_goal['vl'], ', gl = ', \\\n par_args_HH_goal['gl'], ' ...')\n\npnames = ['vl', 'gl']\nparscales = {'vl': 0.1, 'gl': 0.01}\nparseps = {'vl': 3e-2, 'gl': 1e-3}\npest1, opt = make_opt(pnames, residual_fn_context, HH_test_model, pest_context,\n parscales=parscales, parseps=parseps)\n\n#pest_pars = LMpest(freeParams=['vl', 'gl'],\n# testModel=HH_test_model,\n# context=pest_context,\n# verbose_level=2\n# )\n\nstart = clock()\n#pestData_par = pest_pars.run(parDict={'ftol':1e-4,\n# 'xtol':1e-4,\n# },\n# verbose=True)\n\nopt.iterate()\npest_context.set_weights({geom_interface: 0.1, spike_interface: 0.1})\npest2, opt = make_opt(pnames, residual_fn_context, HH_test_model, pest_context,\n parscales=parscales, parseps=parseps)\nopt.iterate()\n\nprint('... finished in %.3f seconds\\n' % (clock()-start))\n\n\nlog_ix = pest2.find_logs()[0]\nsol_pars = pest2.log[log_ix].pars\nHH_test_model.set(pars=sol_pars) #pestData_par['pars_sol'])\nprint(\"Feature evaluation on solution set-up: \", \\\n pest_context.evaluate(HH_test_model))\nprint(\"geom feat residual: \", norm(geom_feat.metric.results))\n\n# solution trajectory involving voltage happens to be the first of the\n# two trajectories stored in each log (one for each model interface, and\n# stored in order of the names of the interfaces).\nsol_traj = pest2.log[log_ix].trajectories[0]\n\n## Finish preparing plots\nprint('\\nPreparing plots')\nfigure()\ndisp_dt = 0.05\nplotData_orig = HH_test_model.sample('orig', ['v'], disp_dt, precise=True)\nplotData_goal = goaltraj.sample(['v'], disp_dt, precise=True)\nplotData_par = sol_traj.sample(['v'])\n\nplt.ylabel('v')\nplt.xlabel('t')\ngoalline=plot(plotData_goal['t'], plotData_goal['v'], label=\"v goal\")\norigline = plot(plotData_orig['t'], plotData_orig['v'], label=\"v initial\")\nestline = plot(plotData_par['t'], plotData_par['v'], label='v estimated')\n\nplt.legend(loc='lower left')\nshow()\n","sub_path":"examples/pest_test3_Cintegrator.py","file_name":"pest_test3_Cintegrator.py","file_ext":"py","file_size_in_byte":8371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"300458771","text":"# 机器人移动,地上有m行和n列的方格,机器人从0,0开始移动,每次只能移动上下左右一格\n# 不能进入行左标+列左标大于k的格子,例如k=18,不能进入方格(35,38),因为3+5+3+8=19\n# 机器人能够到达多少个格子\n\n\ndef moving_count(k, rows, cols):\n if k < 0 or rows < 0 or cols < 0:\n return False\n\n visited = [0] * (rows * cols)\n count = moving_count_core(k, rows, cols, 0, 0, visited)\n return count\n\n\ndef moving_count_core(k, rows, cols, row, col, visited):\n count = 0\n if check(k, rows, cols, row, col, visited):\n visited[row * cols + col] = 1\n count = 1 + moving_count_core(k, rows, cols, row, col - 1, visited) + \\\n moving_count_core(k, rows, cols, row, col + 1, visited) + \\\n moving_count_core(k, rows, cols, row - 1, col, visited) + \\\n moving_count_core(k, rows, cols, row + 1, col, visited)\n return count\n\n\ndef check(k, rows, cols, row, col, visited):\n if rows > row >= 0 and cols > col >= 0 and get_digit_sum(row) + get_digit_sum(col) <= k and \\\n visited[row * cols + col] == 0:\n return True\n return False\n\n\ndef get_digit_sum(n):\n s = 0\n while n > 0:\n s += n % 10\n n //= 10\n return s\n\n\nif __name__ == '__main__':\n print(moving_count(5, 5, 5))\n\n","sub_path":"剑指offer/thirteen.py","file_name":"thirteen.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"310477564","text":"\"\"\"Here you can control the config parameters to run Kytos controller.\n\nBasically you can use a config file (-c option) and use arguments on command\nline. If you specify a config file, then and option configured inside this file\nwill be overridden by the option on command line.\n\"\"\"\n\nimport os\nfrom argparse import ArgumentParser, RawDescriptionHelpFormatter\nfrom configparser import ConfigParser\n\nfrom kytos.core._metadata import __version__\n\nif 'VIRTUAL_ENV' in os.environ:\n BASE_ENV = os.environ['VIRTUAL_ENV']\nelse:\n BASE_ENV = '/'\n\n\nclass KytosConfig():\n \"\"\"KytosConfig class handle settings of Kytos.\"\"\"\n\n def __init__(self):\n \"\"\"Constructor of KytosConfig.\n\n The contructor set defaults parameters that can be used by KytosConfig.\n \"\"\"\n self.options = {}\n conf_parser = ArgumentParser(add_help=False)\n\n conf_parser.add_argument(\"-c\", \"--conf\",\n help=\"Specify a config file\",\n metavar=\"FILE\")\n\n parser = ArgumentParser(prog='kytosd',\n parents=[conf_parser],\n formatter_class=RawDescriptionHelpFormatter,\n description=__doc__)\n\n parser.add_argument('-v', '--version',\n action='version',\n version=\"kytosd %s\" % __version__)\n\n parser.add_argument('-D', '--debug',\n action='store_true',\n help=\"Run in debug mode\")\n\n parser.add_argument('-f', '--foreground',\n action='store_true',\n help=\"Run in foreground (ctrl+c to stop)\")\n\n parser.add_argument('-l', '--listen',\n action='store',\n help=\"IP/Interface to be listened\")\n\n parser.add_argument('-n', '--napps',\n action='store',\n help=\"Specify the napps directory\")\n\n parser.add_argument('-P', '--port',\n action='store',\n help=\"Port to be listened\")\n\n parser.add_argument('-p', '--pidfile',\n action='store',\n help=\"Specify the PID file to save.\")\n\n parser.add_argument('-w', '--workdir',\n action='store',\n help=\"Specify the working directory\")\n\n self.conf_parser, self.parser = conf_parser, parser\n self.parse_args()\n\n def parse_args(self):\n \"\"\"Get the command line options and update kytos settings.\n\n When installed via pip, defaults values are:\n\n defaults = {'pidfile': '/var/run/kytosd.pid',\n 'workdir': '/var/lib/kytos',\n 'napps': '/var/lib/kytos/napps/',\n 'conf': '/etc/kytos/kytos.conf',\n 'logging': '/etc/kytos/logging.ini',\n 'listen': '0.0.0.0',\n 'port': 6633,\n 'foreground': False,\n 'debug': False}\n \"\"\"\n defaults = {'pidfile': os.path.join(BASE_ENV, 'var/run/kytosd.pid'),\n 'workdir': os.path.join(BASE_ENV, 'var/lib/kytos'),\n 'napps': os.path.join(BASE_ENV, 'var/lib/kytos/napps/'),\n 'installed_napps': os.path.join(BASE_ENV,\n 'var/lib/kytos/napps/',\n '.installed'),\n 'conf': os.path.join(BASE_ENV, 'etc/kytos/kytos.conf'),\n 'logging': os.path.join(BASE_ENV, 'etc/kytos/logging.ini'),\n 'listen': '0.0.0.0',\n 'port': 6633,\n 'foreground': False,\n 'debug': False}\n\n options, argv = self.conf_parser.parse_known_args()\n\n config = ConfigParser()\n result = config.read([options.conf or defaults.get('conf')])\n\n if result:\n defaults.update(dict(config.items(\"daemon\")))\n else:\n print('There is no config file.')\n exit(-1)\n\n self.parser.set_defaults(**defaults)\n\n if 'test' in argv:\n argv.pop(argv.index('test'))\n\n self.options['daemon'] = self.parser.parse_args(argv)\n","sub_path":"kytos/core/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":4403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"542377976","text":"import json\nimport logging\n\nfrom kubeflow.testing import ks_util, test_util, util\nfrom kubeflow.tf_operator import test_runner, tf_job_client\nfrom kubernetes import client as k8s_client\n\nREPLICA_RESTART_POLICY_ALWAYS_COMPONENT_NAME = \"replica_restart_policy_always\"\nREPLICA_RESTART_POLICY_ONFAILURE_COMPONENT_NAME = \"replica_restart_policy_onfailure\"\nREPLICA_RESTART_POLICY_NEVER_COMPONENT_NAME = \"replica_restart_policy_never\"\nREPLICA_RESTART_POLICY_EXITCODE_COMPONENT_NAME = \"replica_restart_policy_exitcode\"\n\n\nclass ReplicaRestartPolicyTests(test_util.TestCase):\n\n def __init__(self, args):\n namespace, name, env = test_runner.parse_runtime_params(args)\n self.app_dir = args.app_dir\n self.env = env\n self.namespace = namespace\n self.tfjob_version = args.tfjob_version\n self.params = args.params\n super(ReplicaRestartPolicyTests, self).__init__(\n class_name=\"ReplicaRestartPolicyTests\", name=name)\n\n def run_tfjob_with_replica_restart_policy(self, component,\n replica_restart_policy, exit_code):\n api_client = k8s_client.ApiClient()\n\n # Setup the ksonnet app\n ks_util.setup_ks_app(self.app_dir, self.env, self.namespace, component,\n self.params)\n\n # Create the TF job\n ks_cmd = ks_util.get_ksonnet_cmd(self.app_dir)\n util.run([ks_cmd, \"apply\", self.env, \"-c\", component], cwd=self.app_dir)\n logging.info(\"Created job %s in namespaces %s\", self.name, self.namespace)\n\n # Wait for the job to either be in Running state or a terminal state\n logging.info(\"Wait for conditions Running, Succeeded, or Failed\")\n results = tf_job_client.wait_for_condition(\n api_client,\n self.namespace,\n self.name, [\"Running\", \"Succeeded\", \"Failed\"],\n version=self.tfjob_version,\n status_callback=tf_job_client.log_status)\n logging.info(\"Current TFJob:\\n %s\", json.dumps(results, indent=2))\n\n if replica_restart_policy == \"Always\" and exit_code == 0:\n res = tf_job_client.terminate_and_verify_start_time(\n api_client, self.namespace, self.name, \"ps\", 0, exit_code, True)\n\n elif replica_restart_policy == \"Always\" and exit_code == 1:\n res = tf_job_client.terminate_and_verify_start_time(\n api_client, self.namespace, self.name, \"ps\", 0, exit_code, True)\n\n elif replica_restart_policy == \"OnFailure\" and exit_code == 1:\n res = tf_job_client.terminate_and_verify_start_time(\n api_client, self.namespace, self.name, \"ps\", 0, exit_code, True)\n\n elif replica_restart_policy == \"OnFailure\" and exit_code == 0:\n res = tf_job_client.terminate_and_verify_start_time(\n api_client, self.namespace, self.name, \"ps\", 0, exit_code, False)\n\n elif replica_restart_policy == \"Never\" and exit_code == 1:\n res = tf_job_client.terminate_and_verify_start_time(\n api_client, self.namespace, self.name, \"ps\", 0, exit_code, False)\n\n elif replica_restart_policy == \"Never\" and exit_code == 0:\n res = tf_job_client.terminate_and_verify_start_time(\n api_client, self.namespace, self.name, \"ps\", 0, exit_code, False)\n\n elif replica_restart_policy == \"ExitCode\" and exit_code == 1:\n res = tf_job_client.terminate_and_verify_start_time(\n api_client, self.namespace, self.name, \"ps\", 0, exit_code, False)\n\n else:\n res = tf_job_client.terminate_and_verify_start_time(\n api_client, self.namespace, self.name, \"ps\", 0, exit_code, True)\n\n if res is False:\n self.failure = \"Job {0} in namespace {1} with restart policy {2} failed test \\\n with exit_code {3}\".format(self.name, self.namespace,\n replica_restart_policy, exit_code)\n logging.error(self.failure)\n return\n\n # Delete the TFJob.\n tf_job_client.delete_tf_job(\n api_client, self.namespace, self.name, version=self.tfjob_version)\n logging.info(\"Waiting for job %s in namespaces %s to be deleted.\",\n self.name, self.namespace)\n tf_job_client.wait_for_delete(\n api_client,\n self.namespace,\n self.name,\n self.tfjob_version,\n status_callback=tf_job_client.log_status)\n\n # Verify that the pod is restarted even after the container exits with success.\n # We terminate PS with exit_code=0, and verify it is restarted.\n def test_restart_always_exit_code_0(self):\n return self.run_tfjob_with_replica_restart_policy(\n REPLICA_RESTART_POLICY_ALWAYS_COMPONENT_NAME + \"_\" + self.tfjob_version,\n \"Always\", 0)\n\n # Verify that the pod is restarted after the container exits with 1.\n # We terminate PS with exit_code=1, and verify it is restarted.\n def test_restart_always_exit_code_1(self):\n return self.run_tfjob_with_replica_restart_policy(\n REPLICA_RESTART_POLICY_ALWAYS_COMPONENT_NAME + \"_\" + self.tfjob_version,\n \"Always\", 1)\n\n # Verify that the pod is restarted after failure.\n # We terminate PS with exit_code=1, and verify it is restarted.\n def test_restart_onfailure_exit_code_1(self):\n return self.run_tfjob_with_replica_restart_policy(\n REPLICA_RESTART_POLICY_ONFAILURE_COMPONENT_NAME + \"_\" +\n self.tfjob_version, \"OnFailure\", 1)\n\n # Verify that the pod is restarted after failure.\n # We terminate PS with exit_code=0, and verify it is not restarted.\n def test_restart_onfailure_exit_code_0(self):\n return self.run_tfjob_with_replica_restart_policy(\n REPLICA_RESTART_POLICY_ONFAILURE_COMPONENT_NAME + \"_\" +\n self.tfjob_version, \"OnFailure\", 0)\n\n # Verify that the pod is never restarted.\n # We terminate PS with exit_code=1, and verify it is not restarted.\n def test_restart_never_exit_code_1(self):\n return self.run_tfjob_with_replica_restart_policy(\n REPLICA_RESTART_POLICY_NEVER_COMPONENT_NAME + \"_\" + self.tfjob_version,\n \"Never\", 1)\n\n # Verify that the pod is never restarted.\n # We terminate PS with exit_code=0, and verify it is not restarted.\n def test_restart_never_exit_code_0(self):\n return self.run_tfjob_with_replica_restart_policy(\n REPLICA_RESTART_POLICY_NEVER_COMPONENT_NAME + \"_\" + self.tfjob_version,\n \"Never\", 0)\n\n # Verify that the pod is not restarted after permanent error ( 1-127 ).\n # We terminate PS with exit_code=1, and verify its phase becomes Failed.\n def test_restart_exitcode_permanent_error(self):\n return self.run_tfjob_with_replica_restart_policy(\n REPLICA_RESTART_POLICY_EXITCODE_COMPONENT_NAME + \"_\" + self.tfjob_version,\n \"ExitCode\", 1)\n\n # Verify that the pod is not restarted after retryable error.\n # We terminate PS with exit_code=130, and verify it is restarted.\n def test_restart_exitcode_retryable_error(self):\n return self.run_tfjob_with_replica_restart_policy(\n REPLICA_RESTART_POLICY_EXITCODE_COMPONENT_NAME + \"_\" + self.tfjob_version,\n \"ExitCode\", 130)\n\n\nif __name__ == \"__main__\":\n test_runner.main(module=__name__)\n","sub_path":"py/kubeflow/tf_operator/replica_restart_policy_tests.py","file_name":"replica_restart_policy_tests.py","file_ext":"py","file_size_in_byte":6875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"380158096","text":"from django.core.mail import EmailMessage\nfrom django.shortcuts import redirect, reverse\nfrom django.views.generic.edit import UpdateView\nfrom django.template.loader import render_to_string\n\nfrom .models import Povabilo\n\ndef pošlji_obvestila(dogodek, emails):\n\temail = EmailMessage(\n\t\tsubject=\"Povabilo na dogodek: %s\" % dogodek,\n\t\tbody=render_to_string(\"dogodki/mail/vabilo.txt\", {\"dogodek\": dogodek}),\n\t\tfrom_email=None, # Use default\n\t\tto=[],\n\t\tbcc=emails\n\t)\n\t\n\temail.send()\n\n\tPovabilo.objects.filter(dogodek=dogodek, uporabnik__email__in=emails).update(email_poslan=True)\n\nclass FormsetMixin:\n\tobject = None\n\n\tdef get(self, request, *args, **kwargs):\n\t\tif isinstance(self, UpdateView):\n\t\t\tself.object = self.get_object()\n\n\t\tform_class = self.get_form_class()\n\t\tform = self.get_form(form_class)\n\n\t\tformset_class = self.get_formset_class()\n\t\tformset = self.get_formset(formset_class)\n\n\t\treturn self.render_to_response(self.get_context_data(form=form, formset=formset))\n\n\tdef post(self, request, *args, **kwargs):\n\t\tif isinstance(self, UpdateView):\n\t\t\tself.object = self.get_object()\n\n\t\tform_class = self.get_form_class()\n\t\tform = self.get_form(form_class)\n\n\t\tformset_class = self.get_formset_class()\n\t\tformset = self.get_formset(formset_class)\n\n\t\tif form.is_valid() and formset.is_valid():\n\t\t\treturn self.form_valid(form, formset)\n\t\telse:\n\t\t\treturn self.form_invalid(form, formset)\n\n\tdef get_formset_class(self):\n\t\treturn self.formset_class\n\n\tdef get_formset(self, formset_class):\n\t\treturn formset_class(**self.get_formset_kwargs())\n\n\tdef get_formset_kwargs(self):\n\t\tkwargs = {\n\t\t\t\"instance\": self.object\n\t\t}\n\t\tif self.request.method == \"POST\":\n\t\t\tkwargs[\"data\"] = self.request.POST\n\t\treturn kwargs\n\n\tdef form_valid(self, form, formset):\n\t\tself.object = form.save()\n\t\tformset.instance = self.object\n\t\tformset.save()\n\t\treturn redirect(self.object.get_absolute_url())\n\n\tdef form_invalid(self, form, formset):\n\t\treturn self.render_to_response(self.get_context_data(form=form, formset=formset))\n","sub_path":"dogodki_app/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"528123160","text":"from .test_case import TestCase\nfrom .executioner import execute\nfrom .csv_reader import CSVReader\nfrom .anti_plag import AntiPlag\nfrom .progressbar import ProgressBar\nfrom time import time\nimport inspect\nfrom csv import writer\n\n\ndef _str(x):\n if type(x) is str:\n return '\\'' + x + '\\''\n return str(x)\n\nclass Challenge(object):\n\n def __init__(self, name=None, description='', input_var_name=None, output_var_name=None, method_name=None, timeout=None):\n self.name = name\n self.description = description\n self.test_cases = []\n if method_name:\n assert not output_var_name, 'Both output_var_name and method_name were specified.'\n else:\n assert None not in [input_var_name, output_var_name], 'Either input_var_name and output_var_name or method_name should be specified.'\n self.input_var_name = input_var_name\n self.output_var_name = output_var_name\n self.method_name = method_name\n self.timeout = None\n\n def serialize(self):\n config = {}\n config['name'] = self.name\n config['description'] = self.description\n config['timeout'] = self.timeout\n config['test_cases'] = [tc.serialize() for tc in self.test_cases]\n return config\n\n @classmethod\n def deserialize(cls, config):\n tcs = config.pop('test_cases')\n challenge = cls(**config)\n deserialize = TestCase.deserialize\n [challenge.add_test_case(deserialize(tc)) for tc in tcs]\n return challenge\n\n def add_test_case(self, *test_case):\n if len(test_case) == 1:\n assert type(test_case[0]) == TestCase, 'Required TestCase object. Received ' + str(type(test_case[0]))\n test_case = test_case[0]\n else:\n test_case = TestCase(*test_case)\n self.test_cases.append(test_case)\n\n def _run(self, code, test_case):\n glbls = {}\n input_var_name = self.input_var_name\n output_var_name = self.output_var_name\n method_name = self.method_name\n if input_var_name:\n glbls[input_var_name] = test_case.input\n if output_var_name:\n glbls[output_var_name] = None\n code += '\\nglobals()[{}] = {}'.format(output_var_name, output_var_name)\n elif method_name:\n extra_code = \"\"\"\nimport inspect\nargs = inspect.getargspec({}).args\nif len(args) == 0:\n globals()['output'] = {}()\nelif len(args) == 1:\n globals()['output'] = {}({})\nelse:\n raise Exception('Invalid method signature.')\n\"\"\"\n code += '\\n'\n code += extra_code.format(method_name, method_name, method_name, _str(test_case.input))\n timeout = self.timeout\n if timeout is None:\n timeout = test_case.timeout\n out_dict, time_taken, timed_out = execute(code, glbls, timeout)\n error = None\n if self.output_var_name is not None:\n if self.output_var_name not in out_dict:\n if 'error' not in out_dict:\n error = 'Out variable not set.'\n out = None\n else:\n out = out_dict[self.output_var_name]\n else:\n out = out_dict.get('output')\n passed = out == test_case.output\n if error is None:\n error = out_dict.get('error', None)\n if error:\n passed = False\n if timed_out:\n passed = False\n return {'test_case': test_case.name, 'passed' : passed, 'time_taken' : time_taken, 'error' : error, \n 'timed_out' : timed_out, 'output' : out, 'expected_output' : test_case.output}\n\n\n def run(self, code):\n _run = self._run\n results = []\n pbar = ProgressBar(len(self.test_cases))\n for tc in self.test_cases:\n results.append(self._run(code, tc))\n pbar.update()\n return results\n\n def run_csv(self, input_file, output_file='results.csv', individual_results=True, anti_plag=True):\n\n with open(output_file, 'w') as f:\n csv = CSVReader(input_file)\n csv2 = writer(f)\n data = csv.data\n header = csv.header\n header.append('Passed')\n header.append('Run time')\n header.append('Error')\n header.append('Plagiarized')\n csv2.writerow(header)\n csv2.writerow()\n name_col = csv.name_col\n code_col = csv.code_col\n if anti_plag:\n ap = AntiPlag()\n codes = [x[code_col] for x in data]\n ap_results = ap(codes)\n bad_bois = set()\n for group in ap_results:\n for boi in group:\n bad_bois.add(boi)\n for i, x in enumerate(data):\n name = x[name_col]\n code = x[code_col]\n print(\"Running tests for user {} ...\".format(name))\n results = self.run(code)\n row = data[i][:]\n is_bad_boi = i in bad_bois\n row.append('Yes')\n if is_bad_boi:\n row[-1] = 'No'\n else:\n for tc in results:\n if not tc['passed']:\n row[-1] = 'No'\n break\n row.append(sum([r['time_taken'] for r in results]))\n row.append('')\n for res in results:\n if res['error'] is None:\n if res['timed_out']:\n row[-1] = 'Timed out'\n break\n else:\n row[-1] = res['error']\n if is_bad_boi:\n row.append('Yes')\n else:\n row.append('No')\n csv2.writerow(row)\n","sub_path":"rankerboi/challenge.py","file_name":"challenge.py","file_ext":"py","file_size_in_byte":5850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"524911061","text":"################################################################################\r\n# opus_support.py\r\n#\r\n# General routines that are shared by the OPUS import pipeline and the OPUS\r\n# Django backend and thus can't be in either directory.\r\n#\r\n# These are generally related to conversion of values to/from various text\r\n# formats.\r\n#\r\n# NOTE: THIS FILE HAS 100% TEST COVERAGE AND ANY FUTURE MODIFICATIONS MUST\r\n# MAINTAIN THIS LEVEL OF COVERAGE.\r\n################################################################################\r\n\r\nimport math\r\nimport numpy as np\r\nimport re\r\nimport unittest\r\n\r\ntry: # pragma: no cover\r\n import julian # From pds-tools\r\nexcept ImportError: # pragma: no cover\r\n # If we can't find julian, we need to add the directory for the pds-tools\r\n # repo to our search path. This only happens when running tests.\r\n import os\r\n import sys\r\n PROJECT_ROOT = os.path.dirname(os.path.realpath(__file__))\r\n PDS_OPUS_ROOT = os.path.dirname(PROJECT_ROOT)\r\n sys.path.insert(0, PDS_OPUS_ROOT) # So we can import opus_secrets\r\n from opus_secrets import PDS_TOOLS_PATH\r\n sys.path.insert(0, PDS_TOOLS_PATH)\r\n import julian\r\n\r\nDEG_RAD = np.degrees(1)\r\n\r\n# We limit the available times because julian doesn't support parsing dates\r\n# outside of this range\r\nMIN_TIME = -31556908800 # 1000-01-01T00:00:00\r\nMAX_TIME = 31556995236 # 2999-12-31T23:59:59\r\n\r\n################################################################################\r\n# General routines for handling a spacecraft clock where:\r\n# - there are exactly two fields\r\n# - the clock partition is always one\r\n################################################################################\r\n\r\ndef _parse_two_field_sclk(sclk, ndigits, sep, modval, scname):\r\n \"\"\"Convert a two-field clock string to a numeric value.\r\n\r\n Input:\r\n sclk the spacecraft clock string.\r\n ndigits the maximum number of digits in the leading field.\r\n sep the character that separates the fields, typically a colon\r\n or a period.\r\n modval the modulus value of the second field.\r\n scname name of the spacecraft, used for error messages.\r\n \"\"\"\r\n # Check the partition number before ignoring it\r\n parts = sclk.split('/')\r\n if len(parts) > 2:\r\n raise ValueError('Invalid %s clock format, ' % scname +\r\n 'extraneous slash: ' + sclk)\r\n\r\n if len(parts) == 2:\r\n if parts[0].strip() != '1':\r\n raise ValueError('%s partition number must be one: ' % scname +\r\n sclk)\r\n\r\n sclk = parts[1]\r\n\r\n # Interpret the fields\r\n parts = sclk.split(sep)\r\n # if len(parts) == 1:\r\n # raise ValueError('Invalid %s clock format, ' % scname +\r\n # 'no field separator: ' + sclk)\r\n\r\n if len(parts) > 2:\r\n raise ValueError('More than two %s clock fields: ' % scname + sclk)\r\n\r\n if len(parts) != 1:\r\n # The second field must have the required number of digits\r\n ndigits2 = len(str(modval - 1))\r\n\r\n while len(parts[1]) < ndigits2:\r\n parts[1] = parts[1] + '0'\r\n\r\n # Make sure both fields are integers\r\n ints = []\r\n try:\r\n for part in parts:\r\n ints.append(int(part))\r\n except ValueError:\r\n raise ValueError('%s clock fields must be integers: ' % scname + sclk)\r\n\r\n # Append fields to make two\r\n if len(ints) == 1:\r\n ints.append(0)\r\n\r\n # Check fields for valid ranges\r\n if (ints[0] < 0 or len(parts[0]) > ndigits or\r\n ints[1] < 0 or ints[1] >= modval):\r\n raise ValueError('%s clock trailing field out of range ' % scname +\r\n '0-%d: ' % (modval-1) + sclk)\r\n\r\n return ints[0] + ints[1]/float(modval)\r\n\r\ndef _format_two_field_sclk(value, ndigits, sep, modval, scname):\r\n \"\"\"Convert a number into a valid spacecraft clock string.\r\n\r\n Input:\r\n sclk the spacecraft clock string.\r\n ndigits the number of digits in the leading field. Leading zeros\r\n will be used for padding\r\n sep the character that separates the fields, typically a colon\r\n or a period.\r\n modval the modulus value of the second field.\r\n scname name of the spacecraft, used for error messages.\r\n \"\"\"\r\n # Extract fields\r\n hours = int(value)\r\n value -= hours\r\n value *= modval\r\n\r\n # Round off minutes\r\n minutes = int(value + 0.5)\r\n if minutes >= modval:\r\n minutes -= modval\r\n hours += 1\r\n\r\n # Format\r\n ndigits2 = len(str(modval - 1))\r\n fmt = '%0' + str(ndigits) + 'd' + sep + '%0' + str(ndigits2) + 'd'\r\n return fmt % (hours, minutes)\r\n\r\n\r\n################################################################################\r\n################################################################################\r\n# GALILEO\r\n################################################################################\r\n# Conversion routines for the Galileo spacecraft clock.\r\n#\r\n# The clock has two fields separated by a period. The first field has eight\r\n# digits with leading zeros if necessary. The second is a two-digit number\r\n# 0-90. The partition is always 1.\r\n#\r\n# According to the SCLK kernel for Galileo, there are additional subfields.\r\n# However, the first two are all we need for the Galilieo images currently in\r\n# our archive.\r\n################################################################################\r\n\r\ndef parse_galileo_sclk(sclk, **kwargs):\r\n \"\"\"Convert a Galileo clock string to a numeric value.\"\"\"\r\n return _parse_two_field_sclk(sclk, 8, '.', 91, 'Galileo')\r\n\r\ndef format_galileo_sclk(value, **kwargs):\r\n \"\"\"Convert a number into a valid Galileo clock string.\"\"\"\r\n return _format_two_field_sclk(value, 8, '.', 91, 'Galileo')\r\n\r\nclass GalileoTest(unittest.TestCase):\r\n def test_parse_extra_slash(self):\r\n \"\"\"Galileo parse: Two slashes\"\"\"\r\n with self.assertRaises(ValueError):\r\n parse_galileo_sclk('1/2/03464059.00')\r\n\r\n def test_parse_bad_partition(self):\r\n \"\"\"Galileo parse: Partition number other than 1\"\"\"\r\n with self.assertRaises(ValueError):\r\n parse_galileo_sclk('2/03464059.00')\r\n with self.assertRaises(ValueError):\r\n parse_galileo_sclk('0/03464059.00')\r\n with self.assertRaises(ValueError):\r\n parse_galileo_sclk('1.0/03464059.00')\r\n with self.assertRaises(ValueError):\r\n parse_galileo_sclk('-1/03464059.00')\r\n with self.assertRaises(ValueError):\r\n parse_galileo_sclk('a/03464059.00')\r\n with self.assertRaises(ValueError):\r\n parse_galileo_sclk('/03464059.00')\r\n with self.assertRaises(ValueError):\r\n parse_galileo_sclk('1/03464059.00.00')\r\n\r\n def test_parse_bad_value(self):\r\n \"\"\"Galileo parse: Bad sclk value\"\"\"\r\n with self.assertRaises(ValueError):\r\n parse_galileo_sclk('1/a')\r\n with self.assertRaises(ValueError):\r\n parse_galileo_sclk('1/0123456a')\r\n with self.assertRaises(ValueError):\r\n parse_galileo_sclk('1/03464059.0a')\r\n with self.assertRaises(ValueError):\r\n parse_galileo_sclk('1/03464059.-1')\r\n with self.assertRaises(ValueError):\r\n parse_galileo_sclk('1/03464059.91')\r\n with self.assertRaises(ValueError):\r\n parse_galileo_sclk('1/03464059.450')\r\n with self.assertRaises(ValueError):\r\n parse_galileo_sclk('1/034640590.00')\r\n with self.assertRaises(ValueError):\r\n parse_galileo_sclk('.00')\r\n with self.assertRaises(ValueError):\r\n parse_galileo_sclk('1/.00')\r\n with self.assertRaises(ValueError):\r\n parse_galileo_sclk('1/-1.00')\r\n with self.assertRaises(ValueError):\r\n parse_galileo_sclk('1/-34640590.00')\r\n\r\n def test_parse_good_sclk(self):\r\n \"\"\"Galileo parse: Good sclk format\"\"\"\r\n self.assertEqual(parse_galileo_sclk('1'), 1)\r\n self.assertEqual(parse_galileo_sclk('1.'), 1)\r\n self.assertEqual(parse_galileo_sclk('1.0'), 1)\r\n self.assertEqual(parse_galileo_sclk('1.00'), 1)\r\n self.assertEqual(parse_galileo_sclk('1/03464059.00'), 3464059)\r\n self.assertAlmostEqual(parse_galileo_sclk('1/03464059.90'), 3464059.989010989)\r\n self.assertAlmostEqual(parse_galileo_sclk('1/3464059.90'), 3464059.989010989)\r\n self.assertAlmostEqual(parse_galileo_sclk('1/3464059.9'), 3464059.989010989)\r\n self.assertAlmostEqual(parse_galileo_sclk('03464059.90'), 3464059.989010989)\r\n\r\n def test_format_good_sclk(self):\r\n \"\"\"Galileo format: Good value\"\"\"\r\n self.assertEqual(format_galileo_sclk(0), '00000000.00')\r\n self.assertEqual(format_galileo_sclk(1234), '00001234.00')\r\n self.assertEqual(format_galileo_sclk(12345678), '12345678.00')\r\n self.assertEqual(format_galileo_sclk(1234.989010989), '00001234.90')\r\n self.assertEqual(format_galileo_sclk(99999999.989010989), '99999999.90')\r\n self.assertEqual(format_galileo_sclk(99999999.995010989), '100000000.00')\r\n\r\n\r\n################################################################################\r\n################################################################################\r\n# NEW HORIZONS\r\n################################################################################\r\n# Conversion routines for the New Horizons spacecraft clock.\r\n#\r\n# The clock has two fields separated by a colon. The first field is a ten-digit\r\n# number with leading zeros if necessary. The second is a five-digit number\r\n# 0-50000. The partition is 1 through 0139810086 and 3 starting at 0168423778.\r\n# No observations in between are archived at the RMS Node. Note that the clock\r\n# count does not roll over between partitions.\r\n################################################################################\r\n\r\ndef parse_new_horizons_sclk(sclk, **kwargs):\r\n \"\"\"Convert a New Horizons clock string to a numeric value.\"\"\"\r\n original_sclk = sclk\r\n\r\n # Check for partition number\r\n parts = sclk.partition('/')\r\n if parts[1]: # a slash if present, otherwise an empty string\r\n if parts[0] not in ('1', '3'):\r\n raise ValueError('New Horizons partition number must be 1 or 3: ' +\r\n sclk)\r\n sclk = parts[2]\r\n\r\n # Convert to numeric value\r\n value = _parse_two_field_sclk(sclk, 10, ':', 50000, 'New Horizons')\r\n\r\n # Validate the partition number if any\r\n if parts[1]:\r\n if ((parts[0] == '3' and value < 150000000.) or\r\n (parts[0] == '1' and value > 150000000.)):\r\n raise ValueError('New Horizons partition number is invalid: ' +\r\n original_sclk)\r\n\r\n return value\r\n\r\ndef format_new_horizons_sclk(value, **kwargs):\r\n \"\"\"Convert a number into a valid New Horizons clock string.\"\"\"\r\n return _format_two_field_sclk(value, 10, ':', 50000, 'New Horizons')\r\n\r\nclass NewHorizonsTest(unittest.TestCase):\r\n def test_parse_extra_slash(self):\r\n \"\"\"NewHorizons parse: Two slashes\"\"\"\r\n with self.assertRaises(ValueError):\r\n parse_new_horizons_sclk('1/2/0003103485:49000')\r\n\r\n def test_parse_bad_partition(self):\r\n \"\"\"NewHorizons parse: Partition number other than 1 or 3\"\"\"\r\n with self.assertRaises(ValueError):\r\n parse_new_horizons_sclk('4/0003103485:49000')\r\n with self.assertRaises(ValueError):\r\n parse_new_horizons_sclk('2/0003103485:49000')\r\n with self.assertRaises(ValueError):\r\n parse_new_horizons_sclk('0/0003103485:49000')\r\n with self.assertRaises(ValueError):\r\n parse_new_horizons_sclk('1.0/0003103485:49000')\r\n with self.assertRaises(ValueError):\r\n parse_new_horizons_sclk('-1/0003103485:49000')\r\n with self.assertRaises(ValueError):\r\n parse_new_horizons_sclk('a/0003103485:49000')\r\n with self.assertRaises(ValueError):\r\n parse_new_horizons_sclk('/0003103485:49000')\r\n with self.assertRaises(ValueError):\r\n parse_new_horizons_sclk('1/0003103485:49000:49000')\r\n\r\n def test_parse_bad_value(self):\r\n \"\"\"NewHorizons parse: Bad sclk value\"\"\"\r\n with self.assertRaises(ValueError):\r\n parse_new_horizons_sclk('1/a')\r\n with self.assertRaises(ValueError):\r\n parse_new_horizons_sclk('1/000310348a')\r\n with self.assertRaises(ValueError):\r\n parse_new_horizons_sclk('1/0003103485:4900a')\r\n with self.assertRaises(ValueError):\r\n parse_new_horizons_sclk('1/0003103485:-10000')\r\n with self.assertRaises(ValueError):\r\n parse_new_horizons_sclk('1/0003103485:50000')\r\n with self.assertRaises(ValueError):\r\n parse_new_horizons_sclk('1/0003103485:99999')\r\n with self.assertRaises(ValueError):\r\n parse_new_horizons_sclk('1/00003103485:49000')\r\n with self.assertRaises(ValueError):\r\n parse_new_horizons_sclk(':00000')\r\n with self.assertRaises(ValueError):\r\n parse_new_horizons_sclk('1/:00000')\r\n with self.assertRaises(ValueError):\r\n parse_new_horizons_sclk('1/-1.00000')\r\n with self.assertRaises(ValueError):\r\n parse_new_horizons_sclk('1/-0003103485:00000')\r\n with self.assertRaises(ValueError):\r\n parse_new_horizons_sclk('1/0150000000:00001')\r\n with self.assertRaises(ValueError):\r\n parse_new_horizons_sclk('3/0149999999:49999')\r\n\r\n def test_parse_good_sclk(self):\r\n \"\"\"NewHorizons parse: Good sclk format\"\"\"\r\n self.assertEqual(parse_new_horizons_sclk('1:'), 1)\r\n self.assertEqual(parse_new_horizons_sclk('1:0'), 1)\r\n self.assertEqual(parse_new_horizons_sclk('1:00'), 1)\r\n self.assertEqual(parse_new_horizons_sclk('1/0003103485:25'), 3103485.5)\r\n self.assertEqual(parse_new_horizons_sclk('1/0003103485:25000'), 3103485.5)\r\n self.assertEqual(parse_new_horizons_sclk('3/1003103485:25000'), 1003103485.5)\r\n self.assertEqual(parse_new_horizons_sclk('1/3103485:25000'), 3103485.5)\r\n self.assertEqual(parse_new_horizons_sclk('1/3103485:25'), 3103485.5)\r\n self.assertEqual(parse_new_horizons_sclk('0003103485:25000'), 3103485.5)\r\n self.assertEqual(parse_new_horizons_sclk('3/9999999999:49999'), 9999999999.99998)\r\n self.assertEqual(parse_new_horizons_sclk('1/0149999999:49999'), 149999999.99998)\r\n self.assertEqual(parse_new_horizons_sclk('3/0150000000:00001'), 150000000.00002)\r\n\r\n def test_format_good_sclk(self):\r\n \"\"\"NewHorizons format: Good value\"\"\"\r\n self.assertEqual(format_new_horizons_sclk(0), '0000000000:00000')\r\n self.assertEqual(format_new_horizons_sclk(1234), '0000001234:00000')\r\n self.assertEqual(format_new_horizons_sclk(1234567890), '1234567890:00000')\r\n self.assertEqual(format_new_horizons_sclk(1234.5), '0000001234:25000')\r\n\r\n\r\n################################################################################\r\n################################################################################\r\n# CASSINI\r\n################################################################################\r\n# Conversion routines for the Cassini spacecraft clock.\r\n#\r\n# The clock has two fields separated by a dot. The first field has ten digits.\r\n# The second field has three digits 0-255. The partition is always 1. The\r\n# separator is always a dot.\r\n################################################################################\r\n\r\ndef parse_cassini_sclk(sclk, **kwargs):\r\n \"\"\"Convert a Cassini clock string to a numeric value.\"\"\"\r\n return _parse_two_field_sclk(sclk, 10, '.', 256, 'Cassini')\r\n\r\ndef format_cassini_sclk(value, **kwargs):\r\n \"\"\"Convert a number into a valid Cassini clock string.\"\"\"\r\n return _format_two_field_sclk(value, 10, '.', 256, 'Cassini')\r\n\r\nclass CassiniTest(unittest.TestCase):\r\n def test_parse_extra_slash(self):\r\n \"\"\"Cassini parse: Two slashes\"\"\"\r\n with self.assertRaises(ValueError):\r\n parse_cassini_sclk('1/2/1294341579.000')\r\n\r\n def test_parse_bad_partition(self):\r\n \"\"\"Cassini parse: Partition number other than 1\"\"\"\r\n with self.assertRaises(ValueError):\r\n parse_cassini_sclk('2/1294341579.000')\r\n with self.assertRaises(ValueError):\r\n parse_cassini_sclk('0/1294341579.000')\r\n with self.assertRaises(ValueError):\r\n parse_cassini_sclk('1.0/1294341579.000')\r\n with self.assertRaises(ValueError):\r\n parse_cassini_sclk('-1/1294341579.000')\r\n with self.assertRaises(ValueError):\r\n parse_cassini_sclk('a/1294341579.000')\r\n with self.assertRaises(ValueError):\r\n parse_cassini_sclk('/1294341579.000')\r\n with self.assertRaises(ValueError):\r\n parse_cassini_sclk('1/1294341579.000.000')\r\n\r\n def test_parse_bad_value(self):\r\n \"\"\"Cassini parse: Bad sclk value\"\"\"\r\n with self.assertRaises(ValueError):\r\n parse_cassini_sclk('1/a')\r\n with self.assertRaises(ValueError):\r\n parse_cassini_sclk('1/0123456a')\r\n with self.assertRaises(ValueError):\r\n parse_cassini_sclk('1/1294341579.00a')\r\n with self.assertRaises(ValueError):\r\n parse_cassini_sclk('1/1294341579.-1')\r\n with self.assertRaises(ValueError):\r\n parse_cassini_sclk('1/1294341579.256')\r\n with self.assertRaises(ValueError):\r\n parse_cassini_sclk('1/1294341579.2560')\r\n with self.assertRaises(ValueError):\r\n parse_cassini_sclk('1/01294341579.000')\r\n with self.assertRaises(ValueError):\r\n parse_cassini_sclk('.000')\r\n with self.assertRaises(ValueError):\r\n parse_cassini_sclk('1/.000')\r\n with self.assertRaises(ValueError):\r\n parse_cassini_sclk('1/-1.000')\r\n with self.assertRaises(ValueError):\r\n parse_cassini_sclk('1/-34640590.000')\r\n\r\n def test_parse_good_sclk(self):\r\n \"\"\"Cassini parse: Good sclk format\"\"\"\r\n self.assertEqual(parse_cassini_sclk('1.'), 1)\r\n self.assertEqual(parse_cassini_sclk('1.0'), 1)\r\n self.assertEqual(parse_cassini_sclk('1.00'), 1)\r\n self.assertEqual(parse_cassini_sclk('1/03464059.00'), 3464059)\r\n self.assertEqual(parse_cassini_sclk('1/0003464059.064'), 3464059.25)\r\n self.assertEqual(parse_cassini_sclk('1/3464059.064'), 3464059.25)\r\n self.assertEqual(parse_cassini_sclk('03464059.064'), 3464059.25)\r\n\r\n def test_format_good_sclk(self):\r\n \"\"\"Cassini format: Good value\"\"\"\r\n self.assertEqual(format_cassini_sclk(0), '0000000000.000')\r\n self.assertEqual(format_cassini_sclk(1234), '0000001234.000')\r\n self.assertEqual(format_cassini_sclk(1234567890), '1234567890.000')\r\n self.assertEqual(format_cassini_sclk(1234.250), '0000001234.064')\r\n self.assertEqual(format_cassini_sclk(1234.5), '0000001234.128')\r\n self.assertEqual(format_cassini_sclk(1234.750), '0000001234.192')\r\n\r\n\r\n################################################################################\r\n# Conversion routines for the Cassini orbit number.\r\n#\r\n# Cassini Saturn orbits are numbered 0, A, B, C, 3, 4, 5, ...\r\n#\r\n# In this conversion, the mapping is:\r\n# 0 -> -1\r\n# A -> 0\r\n# B -> 1\r\n# C -> 2\r\n# 3 -> 3\r\n# All higher numbers map to themselves.\r\n################################################################################\r\n\r\nCASSINI_ORBIT_NUMBER = {'A':0, 'B':1, 'C':2}\r\nCASSINI_ORBIT_NAME = {-1:'000', 0:'00A', 1:'00B', 2:'00C'}\r\n\r\ndef parse_cassini_orbit(orbit, **kwargs):\r\n \"\"\"Convert Cassini orbit name to an integer.\"\"\"\r\n try:\r\n intval = int(orbit)\r\n if intval >= 3:\r\n return intval\r\n if intval == 0:\r\n return -1\r\n raise ValueError('Invalid Cassini orbit %s' % orbit)\r\n except ValueError:\r\n pass\r\n\r\n orbit = orbit.upper().strip('0')\r\n try:\r\n return CASSINI_ORBIT_NUMBER[orbit]\r\n except KeyError:\r\n raise ValueError('Invalid Cassini orbit %s' % orbit)\r\n\r\ndef format_cassini_orbit(value, **kwargs):\r\n \"\"\"Convert an internal number for a Cassini orbit to its displayed value.\"\"\"\r\n if value >= 3:\r\n return '%03d' % value\r\n\r\n try:\r\n return CASSINI_ORBIT_NAME[value]\r\n except KeyError:\r\n raise ValueError('Invalid Cassini orbit %s' % str(value))\r\n\r\nclass CassiniOrbitTest(unittest.TestCase):\r\n def test_parse_bad_orbit(self):\r\n \"\"\"CassiniOrbit parse: Bad orbit\"\"\"\r\n with self.assertRaises(ValueError):\r\n parse_cassini_orbit('-1')\r\n with self.assertRaises(ValueError):\r\n parse_cassini_orbit('1')\r\n with self.assertRaises(ValueError):\r\n parse_cassini_orbit('2')\r\n\r\n def test_parse_good_orbit(self):\r\n \"\"\"CassiniOrbit parse: Good orbit\"\"\"\r\n self.assertEqual(parse_cassini_orbit('0'), -1)\r\n self.assertEqual(parse_cassini_orbit('A'), 0)\r\n self.assertEqual(parse_cassini_orbit('0A'), 0)\r\n self.assertEqual(parse_cassini_orbit('00A'), 0)\r\n self.assertEqual(parse_cassini_orbit('a'), 0)\r\n self.assertEqual(parse_cassini_orbit('0a'), 0)\r\n self.assertEqual(parse_cassini_orbit('00a'), 0)\r\n self.assertEqual(parse_cassini_orbit('B'), 1)\r\n self.assertEqual(parse_cassini_orbit('b'), 1)\r\n self.assertEqual(parse_cassini_orbit('C'), 2)\r\n self.assertEqual(parse_cassini_orbit('c'), 2)\r\n self.assertEqual(parse_cassini_orbit('3'), 3)\r\n self.assertEqual(parse_cassini_orbit('4'), 4)\r\n\r\n def test_format_bad_orbit(self):\r\n \"\"\"CassiniOrbit format: Bad orbit\"\"\"\r\n with self.assertRaises(ValueError):\r\n format_cassini_orbit(-2)\r\n\r\n def test_format_good_orbit(self):\r\n \"\"\"CassiniOrbit format: Good orbit\"\"\"\r\n self.assertEqual(format_cassini_orbit(-1), '000')\r\n self.assertEqual(format_cassini_orbit(0), '00A')\r\n self.assertEqual(format_cassini_orbit(1), '00B')\r\n self.assertEqual(format_cassini_orbit(2), '00C')\r\n self.assertEqual(format_cassini_orbit(3), '003')\r\n self.assertEqual(format_cassini_orbit(4), '004')\r\n\r\n\r\n################################################################################\r\n# VOYAGER\r\n################################################################################\r\n# Conversion routines for the Voyager spacecraft clock, also known as the\r\n# \"FDS\" or \"Flight Data System\" count.\r\n#\r\n# The clock has three fields:\r\n# \"hours\": 0-65535\r\n# \"minutes\": 0-60\r\n# \"seconds\": 1-800 (not 0-799!)\r\n#\r\n# The separator between fields can be a colon or a dot.\r\n#\r\n# The partition is ignored when formatting. When parsing, an FDS count can\r\n# optionally begin with \"2/\", \"3/\" or \"4/\" because these are the partitions\r\n# for the flybys.\r\n#\r\n# When dealing with Voyager products, sometimes we ignore the first separator,\r\n# so parsing six- or seven-digit numbers are handled assuming that the hours and\r\n# minutes have been appended with no separator.\r\n################################################################################\r\n\r\nVOYAGER_PLANET_NAMES = {5:'Jupiter', 6:'Saturn', 7:'Uranus', 8:'Neptune'}\r\nVOYAGER_PLANET_PARTITIONS = {5:2, 6:2, 7:3, 8:4}\r\n\r\ndef parse_voyager_sclk(sclk, planet=None, **kwargs):\r\n \"\"\"Convert a Voyager clock string (FDS) to a numeric value.\r\n\r\n Typically, a partition number is not specified for FDS counts. However, if\r\n it is, it must be compatible with the planetary flyby. The partition number\r\n is 2 for Jupiter and Saturn, 3 for Uranus, and 4 for Neptune.\r\n\r\n If the planet is not specified (planet = None), then any partition value in\r\n the range 2-4 is allowed and its value is ignored. If the planet is given as\r\n input (5 for Jupiter, 6 for Saturn, 7 for Uranus, 8 for Neptune), then an\r\n explicitly stated partition number must be compatible with the associated\r\n planetary flyby.\r\n \"\"\"\r\n assert planet in (None, 5, 6, 7, 8), 'Invalid planet value: ' + str(planet)\r\n\r\n # Check the partition number before ignoring it\r\n parts = sclk.split('/')\r\n if len(parts) > 2:\r\n raise ValueError('Invalid FDS format, extraneous \"/\": ' + sclk)\r\n\r\n if len(parts) == 2:\r\n try:\r\n partition = int(parts[0])\r\n except ValueError:\r\n raise ValueError('Partition number is not an integer: ' + sclk)\r\n\r\n if planet is None:\r\n if partition not in VOYAGER_PLANET_PARTITIONS.values():\r\n raise ValueError('Partition number out of range 2-4: ' + sclk)\r\n else:\r\n required_partition = VOYAGER_PLANET_PARTITIONS[planet]\r\n if partition != required_partition:\r\n name = VOYAGER_PLANET_NAMES[planet]\r\n raise ValueError('Partition number for %s flyby ' % name +\r\n 'must be %d: ' % required_partition + sclk)\r\n\r\n sclk = parts[1]\r\n\r\n # Separator can be '.' or ':'\r\n if '.' in sclk:\r\n parts = sclk.split('.')\r\n elif ':' in sclk:\r\n parts = sclk.split(':')\r\n else:\r\n parts = [sclk]\r\n\r\n if len(parts) > 3:\r\n raise ValueError('More than three fields in Voyager clock: ' + sclk)\r\n\r\n # Make sure field are integers\r\n ints = []\r\n try:\r\n for part in parts:\r\n ints.append(int(part))\r\n except ValueError:\r\n raise ValueError('Voyager clock fields must be integers: ' + sclk)\r\n\r\n # If we have just a single six- or seven-digit number, maybe the separator\r\n # was omitted. This is how Voyager image names are handled.\r\n if len(ints) == 1 and ints[0] >= 100000:\r\n ints = [ints[0] // 100, ints[0] % 100]\r\n\r\n # Append fields to make three\r\n if len(ints) == 1:\r\n ints.append(0)\r\n if len(ints) == 2:\r\n ints.append(1)\r\n\r\n # Check fields for valid ranges\r\n if ints[0] > 65535 or ints[0] < 0:\r\n raise ValueError('Voyager clock \"hours\" out of range 0-65535: ' + sclk)\r\n if ints[1] > 59 or ints[1] < 0:\r\n raise ValueError('Voyager clock \"minutes\" out of range 0-59: ' + sclk)\r\n if ints[2] > 800 or ints[2] < 1:\r\n raise ValueError('Voyager clock \"seconds\" out of range 1-800: ' + sclk)\r\n\r\n # Return in units of FDS hours\r\n return ints[0] + (ints[1] + (ints[2]-1) / 800.) / 60.\r\n\r\ndef format_voyager_sclk(value, sep=':', fields=3, **kwargs):\r\n \"\"\"Convert a number in units of FDS hours to valid Voyager clock string.\"\"\"\r\n assert sep in (':', '.'), 'Separator must be \":\" or \".\": ' + str(sep)\r\n assert fields in (2,3), 'Fields must be 2 or 3: ' + str(fields)\r\n\r\n saved_value = value\r\n\r\n # Extract hours, minutes seconds\r\n hours = int(value)\r\n value -= hours\r\n value *= 60\r\n\r\n # Fields == 2\r\n if fields == 2:\r\n minutes = int(value + 0.5) # round off minutes\r\n\r\n # Handle carry\r\n if minutes >= 60:\r\n minutes -= 60\r\n hours += 1\r\n\r\n # Fields == 3\r\n else:\r\n minutes = int(value)\r\n value -= minutes\r\n value *= 800\r\n value += 1\r\n seconds = int(value + 0.5) # round off seconds\r\n\r\n # Handle carry\r\n if seconds > 800:\r\n seconds -= 800\r\n minutes += 1\r\n if minutes >= 60:\r\n minutes -= 60\r\n hours += 1\r\n\r\n # Check range\r\n if hours > 65535:\r\n raise ValueError('Voyager clock \"hours\" cannot exceed 65535: ' +\r\n str(saved_value))\r\n\r\n # Format\r\n if fields == 3:\r\n sclk = '%05d%s%02d%s%03d' % (hours, sep, minutes, sep, seconds)\r\n else:\r\n sclk = '%05d%s%02d' % (hours, sep, minutes)\r\n\r\n return sclk\r\n\r\nclass VoyagerTest(unittest.TestCase):\r\n def test_parse_extra_slash(self):\r\n \"\"\"Voyager parse: Two slashes\"\"\"\r\n with self.assertRaises(ValueError):\r\n parse_voyager_sclk('1/2/08966:30:752')\r\n\r\n def test_parse_bad_partition(self):\r\n \"\"\"\"\"Voyager parse: Bad partition/planet\"\"\"\"\"\r\n with self.assertRaises(ValueError):\r\n parse_voyager_sclk('1/08966:30:752')\r\n with self.assertRaises(ValueError):\r\n parse_voyager_sclk('5/08966:30:752')\r\n with self.assertRaises(ValueError):\r\n parse_voyager_sclk('-1/08966:30:752')\r\n with self.assertRaises(ValueError):\r\n parse_voyager_sclk('6/08966:30:752')\r\n with self.assertRaises(ValueError):\r\n parse_voyager_sclk('a/08966:30:752')\r\n with self.assertRaises(AssertionError):\r\n parse_voyager_sclk('1/08966:30:752', planet=4)\r\n with self.assertRaises(AssertionError):\r\n parse_voyager_sclk('1/08966:30:752', planet=9)\r\n with self.assertRaises(ValueError):\r\n parse_voyager_sclk('1/08966:30:752', planet=5)\r\n with self.assertRaises(ValueError):\r\n parse_voyager_sclk('3/08966:30:752', planet=5)\r\n with self.assertRaises(ValueError):\r\n parse_voyager_sclk('1/08966:30:752', planet=6)\r\n with self.assertRaises(ValueError):\r\n parse_voyager_sclk('3/08966:30:752', planet=6)\r\n with self.assertRaises(ValueError):\r\n parse_voyager_sclk('1/08966:30:752', planet=7)\r\n with self.assertRaises(ValueError):\r\n parse_voyager_sclk('2/08966:30:752', planet=7)\r\n with self.assertRaises(ValueError):\r\n parse_voyager_sclk('1/08966:30:752', planet=8)\r\n with self.assertRaises(ValueError):\r\n parse_voyager_sclk('5/08966:30:752', planet=8)\r\n\r\n def test_parse_bad_sclk(self):\r\n \"\"\"Voyager parse: Bad sclk\"\"\"\r\n with self.assertRaises(ValueError):\r\n parse_voyager_sclk('1:0:1:0')\r\n with self.assertRaises(ValueError):\r\n parse_voyager_sclk('0:0:0')\r\n with self.assertRaises(ValueError):\r\n parse_voyager_sclk('0:0:801')\r\n with self.assertRaises(ValueError):\r\n parse_voyager_sclk('0:-1:0')\r\n with self.assertRaises(ValueError):\r\n parse_voyager_sclk('0:61:0')\r\n with self.assertRaises(ValueError):\r\n parse_voyager_sclk('-1:0:0')\r\n with self.assertRaises(ValueError):\r\n parse_voyager_sclk('65536:0:0')\r\n with self.assertRaises(ValueError):\r\n parse_voyager_sclk('0:0:a')\r\n with self.assertRaises(ValueError):\r\n parse_voyager_sclk('0:a:0')\r\n with self.assertRaises(ValueError):\r\n parse_voyager_sclk('a:0:0')\r\n\r\n def test_parse_good_partition(self):\r\n \"\"\"Voyager parse: Good partition/planet\"\"\"\r\n self.assertAlmostEqual(parse_voyager_sclk('2/0:0:1'), 0)\r\n self.assertAlmostEqual(parse_voyager_sclk('3/0:0:1'), 0)\r\n self.assertAlmostEqual(parse_voyager_sclk('4/0:0:1'), 0)\r\n self.assertAlmostEqual(parse_voyager_sclk('2/0:0:1', planet=5), 0)\r\n self.assertAlmostEqual(parse_voyager_sclk('2/0:0:1', planet=6), 0)\r\n self.assertAlmostEqual(parse_voyager_sclk('3/0:0:1', planet=7), 0)\r\n self.assertAlmostEqual(parse_voyager_sclk('4/0:0:1', planet=8), 0)\r\n\r\n def test_parse_good_sclk(self):\r\n \"\"\"Voyager parse: Good sclk\"\"\"\r\n self.assertEqual(parse_voyager_sclk('0'), 0)\r\n self.assertEqual(parse_voyager_sclk('0:0'), 0)\r\n self.assertEqual(parse_voyager_sclk('0:0:1'), 0)\r\n self.assertEqual(parse_voyager_sclk('0:0:401'), .5/60.)\r\n self.assertEqual(parse_voyager_sclk('0:1:1'), 1/60.)\r\n self.assertEqual(parse_voyager_sclk('1:0:1'), 1)\r\n self.assertEqual(parse_voyager_sclk('1000:00:001'), 1000)\r\n self.assertEqual(parse_voyager_sclk('1000.00.001'), 1000)\r\n self.assertEqual(parse_voyager_sclk('100000'), 1000)\r\n\r\n def test_format_good_sclk_3(self):\r\n \"\"\"Voyager format: Good sclk 3 fields\"\"\"\r\n self.assertEqual(format_voyager_sclk(0), '00000:00:001')\r\n self.assertEqual(format_voyager_sclk(.5/60), '00000:00:401')\r\n self.assertEqual(format_voyager_sclk(1/60), '00000:01:001')\r\n self.assertEqual(format_voyager_sclk(1), '00001:00:001')\r\n self.assertEqual(format_voyager_sclk(5000), '05000:00:001')\r\n self.assertEqual(format_voyager_sclk(59.97/3600), '00000:01:001')\r\n self.assertEqual(format_voyager_sclk(5000.9999999), '05001:00:001')\r\n self.assertEqual(format_voyager_sclk(5000, sep='.'), '05000.00.001')\r\n\r\n def test_format_good_sclk_2(self):\r\n \"\"\"Voyager format: Good sclk 2 fields\"\"\"\r\n self.assertEqual(format_voyager_sclk(0, fields=2), '00000:00')\r\n self.assertEqual(format_voyager_sclk(.39/60, fields=2), '00000:00')\r\n self.assertEqual(format_voyager_sclk(.5/60, fields=2), '00000:01')\r\n self.assertEqual(format_voyager_sclk(59.6/60, fields=2), '00001:00')\r\n self.assertEqual(format_voyager_sclk(1, fields=2), '00001:00')\r\n self.assertEqual(format_voyager_sclk(5000, fields=2), '05000:00')\r\n self.assertEqual(format_voyager_sclk(5000, fields=2, sep='.'), '05000.00')\r\n\r\n def test_format_bad_sclk(self):\r\n \"\"\"Voyager format: Bad sclk\"\"\"\r\n with self.assertRaises(ValueError):\r\n format_voyager_sclk(65536)\r\n\r\n\r\n################################################################################\r\n################################################################################\r\n# TIME CONVERSION\r\n################################################################################\r\n\r\ndef parse_time(iso, unit=None, **kwargs):\r\n iso = str(iso)\r\n # For raw numbers, try to use the current unit to figure out what\r\n # to do\r\n try:\r\n et = float(iso)\r\n except:\r\n pass\r\n else:\r\n if not math.isfinite(et):\r\n raise ValueError('Invalid time syntax: '+iso)\r\n if unit == 'et':\r\n return julian.tai_from_tdb(et)\r\n if unit == 'jd':\r\n iso = 'JD' + iso\r\n elif unit == 'jed':\r\n iso = 'JED' + iso\r\n elif unit == 'mjd':\r\n iso = 'MJD' + iso\r\n elif unit == 'mjed':\r\n iso = 'MJED' + iso\r\n try:\r\n (day, sec, time_type) = julian.day_sec_type_from_string(iso)\r\n except:\r\n raise ValueError('Invalid time syntax: '+iso)\r\n if time_type != 'UTC':\r\n raise ValueError('Invalid time syntax: '+iso)\r\n ret = julian.tai_from_day(day) + sec\r\n if ret < MIN_TIME or ret > MAX_TIME:\r\n raise ValueError\r\n return ret\r\n\r\ndef format_time_ymd(tai, **kwargs):\r\n return julian.iso_from_tai(tai, ymd=True, digits=3)\r\n\r\ndef format_time_ydoy(tai, **kwargs):\r\n return julian.iso_from_tai(tai, ymd=False, digits=3)\r\n\r\ndef format_time_jd(tai, **kwargs):\r\n (day, sec) = julian.day_sec_from_tai(tai)\r\n jd = julian.jd_from_day_sec(day, sec)\r\n # We want seconds at a resolution of .001\r\n # There are 86400 seconds in a day, which is roughly 100,000\r\n # So we want 5+3=8 decimal places\r\n return 'JD%.8f' % jd\r\n\r\ndef format_time_jed(tai, **kwargs):\r\n jed = julian.jed_from_tai(tai)\r\n # We want seconds at a resolution of .001\r\n # There are 86400 seconds in a day, which is roughly 100,000\r\n # So we want 5+3=8 decimal places\r\n return 'JED%.8f' % jed\r\n\r\ndef format_time_mjd(tai, **kwargs):\r\n (day, sec) = julian.day_sec_from_tai(tai)\r\n mjd = julian.mjd_from_day_sec(day, sec)\r\n # We want seconds at a resolution of .001\r\n # There are 86400 seconds in a day, which is roughly 100,000\r\n # So we want 5+3=8 decimal places\r\n return 'MJD%.8f' % mjd\r\n\r\ndef format_time_mjed(tai, **kwargs):\r\n mjed = julian.mjed_from_tai(tai)\r\n # We want seconds at a resolution of .001\r\n # There are 86400 seconds in a day, which is roughly 100,000\r\n # So we want 5+3=8 decimal places\r\n return 'MJED%.8f' % mjed\r\n\r\ndef format_time_et(tai, **kwargs):\r\n et = julian.tdb_from_tai(tai)\r\n return '%.3f' % et\r\n\r\nclass TimeTest(unittest.TestCase):\r\n # Note - julian.py has its own test suite, so we don't need to duplicate\r\n # all that here. We just do a couple of simple tests to make sure the\r\n # interface is working.\r\n def test_format_ymd(self):\r\n \"\"\"Time format: YMD\"\"\"\r\n self.assertEqual(format_time_ymd(0), '1999-12-31T23:59:28.000')\r\n self.assertEqual(format_time_ymd(600000000), '2019-01-05T10:39:23.000')\r\n\r\n def test_format_ydoy(self):\r\n \"\"\"Time format: YDOY\"\"\"\r\n self.assertEqual(format_time_ydoy(0), '1999-365T23:59:28.000')\r\n self.assertEqual(format_time_ydoy(600000000), '2019-005T10:39:23.000')\r\n\r\n def test_format_jd(self):\r\n \"\"\"Time format: JD\"\"\"\r\n self.assertEqual(format_time_jd(0), 'JD2451544.49962963')\r\n self.assertEqual(format_time_jd(600000000), 'JD2458488.94401620')\r\n\r\n def test_format_jed(self):\r\n \"\"\"Time format: JED\"\"\"\r\n self.assertEqual(format_time_jed(0), 'JED2451544.50037250')\r\n self.assertEqual(format_time_jed(600000000), 'JED2458488.94481695')\r\n\r\n def test_format_mjd(self):\r\n \"\"\"Time format: MJD\"\"\"\r\n self.assertEqual(format_time_mjd(0), 'MJD51543.99962963')\r\n self.assertEqual(format_time_mjd(600000000), 'MJD58488.44401620')\r\n\r\n def test_format_mjed(self):\r\n \"\"\"Time format: MJED\"\"\"\r\n self.assertEqual(format_time_mjed(0), 'MJED51544.00037250')\r\n self.assertEqual(format_time_mjed(600000000), 'MJED58488.44481695')\r\n\r\n def test_format_et(self):\r\n \"\"\"Time format: ET\"\"\"\r\n self.assertEqual(format_time_et(0), '-43167.816')\r\n self.assertEqual(format_time_et(600000000), '599956832.184')\r\n\r\n def test_parse(self):\r\n \"\"\"Time parse\"\"\"\r\n self.assertEqual(parse_time('1999-12-31T23:59:28.000'), 0)\r\n self.assertEqual(parse_time('2019-005T10:39:23.000'), 600000000)\r\n self.assertEqual(julian.jd_from_time(julian.time_from_jd(0)), 0)\r\n self.assertAlmostEqual(julian.jd_from_time(julian.time_from_jd(1000.123)),\r\n 1000.123)\r\n self.assertEqual(julian.mjd_from_time(julian.time_from_mjd(0)), 0)\r\n self.assertAlmostEqual(julian.mjd_from_time(julian.time_from_mjd(1000.123)),\r\n 1000.123)\r\n self.assertAlmostEqual(parse_time('JD2451544.49962963'), 0, places=3)\r\n self.assertAlmostEqual(parse_time('JD2458488.94401620'), 600000000, places=3)\r\n self.assertAlmostEqual(parse_time('2458488.94401620', unit='jd'), 600000000,\r\n places=3)\r\n self.assertAlmostEqual(parse_time('JED2451544.49962963'), -64.18391461631109,\r\n places=3)\r\n self.assertAlmostEqual(parse_time('2451544.49962963', unit='jed'),\r\n -64.18391461631109, places=3)\r\n self.assertAlmostEqual(parse_time('JED2458488.94401620'), 599999930.8156223,\r\n places=3)\r\n self.assertAlmostEqual(parse_time('MJD51543.99962963'), 0, places=3)\r\n self.assertAlmostEqual(parse_time('MJD58488.44401620'), 600000000, places=3)\r\n self.assertAlmostEqual(parse_time('58488.44401620', unit='mjd'), 600000000,\r\n places=3)\r\n self.assertAlmostEqual(parse_time('MJED51543.99962963'), -64.18391461631109,\r\n places=3)\r\n self.assertAlmostEqual(parse_time('51543.99962963', unit='mjed'),\r\n -64.18391461631109, places=3)\r\n self.assertAlmostEqual(parse_time('MJED58488.44401620'), 599999930.8156223,\r\n places=3)\r\n self.assertAlmostEqual(parse_time('50000', unit='et'), 93167.81604149533,\r\n places=3)\r\n with self.assertRaises(ValueError):\r\n parse_time('nan')\r\n with self.assertRaises(ValueError):\r\n parse_time('inf')\r\n with self.assertRaises(ValueError):\r\n parse_time('2000')\r\n with self.assertRaises(ValueError):\r\n parse_time('2000-01-01. TAI')\r\n with self.assertRaises(ValueError):\r\n parse_time('JD9999999999')\r\n\r\n def test_idempotent(self):\r\n \"\"\"Time idempotency\"\"\"\r\n self.assertEqual(format_time_ymd(parse_time('2015-05-03T10:12:34.123')),\r\n '2015-05-03T10:12:34.123')\r\n self.assertEqual(format_time_ydoy(parse_time('2015-122T10:12:34.123')),\r\n '2015-122T10:12:34.123')\r\n self.assertEqual(format_time_jd(parse_time('JD2234567.123')),\r\n 'JD2234567.12300000')\r\n self.assertEqual(format_time_jed(parse_time('JED2234567.123')),\r\n 'JED2234567.12300000')\r\n\r\n\r\n################################################################################\r\n################################################################################\r\n# ANGLE CONVERSION\r\n################################################################################\r\n\r\ndef parse_dms_hms(s, conversion_factor=1, **kwargs):\r\n \"\"\"Parse DMS, HMS, or single number, but \"x x x\" defaults to DMS.\"\"\"\r\n return _parse_dms_hms(s, conversion_factor, allow_dms=True, allow_hms=True,\r\n default='dms')\r\n\r\ndef parse_hms_dms(s, conversion_factor=1, **kwargs):\r\n \"\"\"Parse DMS, HMS, or single number, but \"x x x\" defaults to HMS.\"\"\"\r\n return _parse_dms_hms(s, conversion_factor, allow_dms=True, allow_hms=True,\r\n default='hms')\r\n\r\ndef parse_dms(s, conversion_factor=1, **kwargs):\r\n \"\"\"Parse a DMS string or single number.\"\"\"\r\n return _parse_dms_hms(s, conversion_factor, allow_dms=True, allow_hms=False,\r\n default='dms')\r\n\r\ndef parse_hms(s, conversion_factor=1, **kwargs):\r\n \"\"\"Parse an HMS string or single number.\"\"\"\r\n return _parse_dms_hms(s, conversion_factor, allow_dms=False, allow_hms=True,\r\n default='hms')\r\n\r\ndef _parse_dms_hms(s, conversion_factor=1, allow_dms=True, allow_hms=True,\r\n default='dms'):\r\n \"\"\"Parse a DMS or HMS or \"x x x\" or plain number.\"\"\"\r\n # Note: conversion_factor is used here for unit=radians. In that case if\r\n # the user enters something like \"1d\" it needs to be interpreted as degrees\r\n # and converted to radians. But if the user just types a single number, that\r\n # should be interpreted as radians directly.\r\n s = s.lower().strip()\r\n # '' and variants => s\r\n s = s.replace(\"''\", 's').replace('\"', 's').replace(chr(8243), 's')\r\n # ' and variants => m\r\n s = s.replace(\"'\", 'm').replace(chr(8242), 'm')\r\n # deg symbol => d\r\n s = s.replace(chr(176), 'd')\r\n\r\n format_types = []\r\n if allow_dms:\r\n format_types.append(('d', 1))\r\n if allow_hms:\r\n format_types.append(('h', 15))\r\n for format_char, format_factor in format_types:\r\n # We allow exponential notation in the first position\r\n match = re.fullmatch(r'(|[+-]) *(|\\d+(|e(|\\+)\\d+)(|\\.\\d*)'+format_char+\r\n r') *(|\\d+(|\\.\\d*)m) *(|\\d+(|\\.\\d*)s)', s)\r\n if match is None and format_char == default[0]:\r\n # Check for just \"N N N\" if we are looking at the default format\r\n match = re.fullmatch(r'(|[+-]) *(\\d+)()()() +(\\d+)() +(\\d+(|.\\d*))',\r\n s)\r\n if match:\r\n neg = match[1]\r\n degrees_hours = match[2]\r\n minute = match[6]\r\n second = match[8]\r\n force_dh_int = False\r\n force_m_int = False\r\n val = 0\r\n if second:\r\n second = second.strip('s')\r\n # Only \"second\" can have a fractional part if it's provided\r\n force_m_int = True\r\n force_dh_int = True\r\n second = float(second)\r\n if not math.isfinite(second) or second < 0 or second >= 60:\r\n raise ValueError\r\n val += second / 3600\r\n if minute:\r\n minute = minute.strip('m')\r\n # Only \"minute\" can have a fractional part if second is not\r\n # provided\r\n force_dh_int = True\r\n minute = float(minute)\r\n if force_m_int:\r\n if minute != int(minute):\r\n raise ValueError\r\n if not math.isfinite(minute) or minute < 0 or minute >= 60:\r\n raise ValueError\r\n val += minute / 60\r\n if degrees_hours:\r\n degrees_hours = degrees_hours.strip(format_char)\r\n degrees_hours = float(degrees_hours)\r\n if force_dh_int:\r\n if degrees_hours != int(degrees_hours):\r\n raise ValueError\r\n if not math.isfinite(degrees_hours):\r\n raise ValueError\r\n val += degrees_hours\r\n if neg == '-':\r\n val = -val\r\n return val * format_factor / conversion_factor\r\n\r\n # We don't want to allow numbers with spaces in them because that will cause\r\n # potential ambiguity with the \"x x x\" DMS/HMS format.\r\n s = _clean_numeric_field(s, compress_spaces=False)\r\n ret = float(s)\r\n if not math.isfinite(ret):\r\n raise ValueError\r\n\r\n # Note: It is very important that parse_hms_dms is NOT USED for things like\r\n # units == 'radians' because this factor of 15 will be applied\r\n # inappropriately\r\n if default == 'hms':\r\n ret *= 15\r\n\r\n return ret\r\n\r\n\r\ndef format_dms_hms(val, unit_id=None, unit=None, numerical_format=None,\r\n keep_trailing_zeros=False):\r\n \"\"\"Format a number as DMS or HMS or a single number as appropriate.\"\"\"\r\n if unit == 'hours' or unit == 'hms':\r\n # Just do the normal numeric formatting, but divide by 15 first to be\r\n # in units of hours\r\n val /= 15\r\n\r\n # numerical_format is in degrees, regardless of whether val is in degrees\r\n # or hours.\r\n # For DMS, our fractional amount is in seconds, which is 1/3600 degree.\r\n # Round it to 1/1000 to be conservative, which is 3 decimal\r\n # places. Thus we should subtract 3 from the numerical_format size.\r\n # For HMS, our fractional amount is in seconds (but val is in hours), which\r\n # is 1/3600*15=1/240 degree. Round it to 1/100 to be conservative, which is\r\n # 2 decimal places. Thus we should subtract 2 from the numerical_format\r\n # size.\r\n # For plain \"hour\", we need to add two digits to account for the factor of\r\n # 15.\r\n # For plain \"radians\", it's 2 digits for the factor of 57.\r\n if unit == 'degrees':\r\n subtract_amt = 0\r\n elif unit == 'dms':\r\n subtract_amt = 3\r\n elif unit == 'hms':\r\n subtract_amt = 2\r\n elif unit == 'hours':\r\n subtract_amt = -2\r\n else:\r\n assert unit == 'radians'\r\n subtract_amt = -2\r\n\r\n new_dec = max(int(numerical_format[1:-1])-subtract_amt, 0)\r\n\r\n if unit in ['degrees', 'radians', 'hours']:\r\n # Plain numeric formatting\r\n new_format = f'%.{new_dec}f'\r\n if abs(val) >= 1e8:\r\n new_format = new_format.replace('f', 'e')\r\n ret = new_format % val\r\n if not keep_trailing_zeros:\r\n ret = _strip_trailing_zeros(ret)\r\n return ret\r\n\r\n # For DMS or HMS, the new format is just for the seconds, so we want to have\r\n # 2 digits with leading zeroes as necessary\r\n if new_dec == 0:\r\n new_format = '02d'\r\n else:\r\n new_format = f'0{new_dec+3}.{new_dec}f'\r\n\r\n val_sec = val * 3600 # Do all the work in seconds for better rounding\r\n neg = val_sec < 0\r\n val_sec = abs(val_sec)\r\n # Round the input number to the given precision\r\n prec = 10**new_dec\r\n val_sec = np.round(val_sec * prec) / prec\r\n dh = int(val_sec // 3600)\r\n val_sec = val_sec-dh*3600\r\n m = min(int(val_sec // 60), 59)\r\n val_sec = val_sec-m*60\r\n\r\n leading_char = 'h'\r\n if unit == 'dms':\r\n leading_char = 'd'\r\n leading_fmt = 'd'\r\n if abs(val) >= 1e8:\r\n leading_fmt = '.0e'\r\n full_format = f'%{leading_fmt}{leading_char} %02dm %0{new_format}'\r\n ret = full_format % (dh, m, val_sec)\r\n if not keep_trailing_zeros:\r\n ret = _strip_trailing_zeros(ret)\r\n ret += 's'\r\n if neg:\r\n ret = '-' + ret\r\n return ret\r\n\r\nclass DMSHMSTest(unittest.TestCase):\r\n def test_parse_hms(self):\r\n \"\"\"HMS parse\"\"\"\r\n self.assertEqual(parse_hms('0h 0m 0s'), 0*15)\r\n self.assertEqual(parse_hms('1h 0m 0s'), 1*15)\r\n self.assertEqual(parse_hms('0h 30m 0s'), 0.5*15)\r\n self.assertEqual(parse_hms('0h 0m 36s'), 0.01*15)\r\n self.assertEqual(parse_hms('23h 30m 36s'), 23.51*15)\r\n self.assertEqual(parse_hms('23h 30\\' 36\"'), 23.51*15)\r\n self.assertEqual(parse_hms('23h 30\\' 36\\'\\''), 23.51*15)\r\n self.assertEqual(parse_hms('+23h 30m 36s'), 23.51*15)\r\n self.assertEqual(parse_hms(' + 23h 30m 36s'), 23.51*15)\r\n self.assertEqual(parse_hms('-23h 30m 36s'), -23.51*15)\r\n self.assertEqual(parse_hms(' - 23h 30m 36s'), -23.51*15)\r\n self.assertEqual(parse_hms('23H 30M 36S'), 23.51*15)\r\n with self.assertRaises(ValueError):\r\n parse_hms('23.1h 30m 36s')\r\n with self.assertRaises(ValueError):\r\n parse_hms('23.1h 30m')\r\n with self.assertRaises(ValueError):\r\n parse_hms('23h 30.123m 36s')\r\n with self.assertRaises(ValueError):\r\n parse_hms('30.123m 36s')\r\n with self.assertRaises(ValueError):\r\n parse_hms('23d 30m 36s')\r\n self.assertEqual(parse_hms('23.h 30.m 36.36s'), 23.5101*15)\r\n self.assertEqual(parse_hms('23.h 30.m 36.36s'), 23.5101*15)\r\n self.assertEqual(parse_hms('23.h30.m36.36s'), 23.5101*15)\r\n self.assertEqual(parse_hms('23.000h 30.000m 36.36000s'), 23.5101*15)\r\n self.assertEqual(parse_hms('0h 0m'), 0*15)\r\n self.assertEqual(parse_hms('0h 0.30m'), 0.005*15)\r\n self.assertEqual(parse_hms('0h 0s'), 0*15)\r\n self.assertEqual(parse_hms('0h 36.36s'), 0.0101*15)\r\n self.assertEqual(parse_hms('0m 0s'), 0*15)\r\n self.assertEqual(parse_hms('10h'), 10*15)\r\n self.assertEqual(parse_hms('10.123h'), 10.123*15)\r\n self.assertEqual(parse_hms('0m'), 0*15)\r\n self.assertEqual(parse_hms('30m'), 0.5*15)\r\n self.assertEqual(parse_hms('30.30m'), 0.505*15)\r\n self.assertEqual(parse_hms('36s'), 0.01*15)\r\n self.assertEqual(parse_hms('36.36s'), 0.0101*15)\r\n with self.assertRaises(ValueError):\r\n parse_hms('60m')\r\n with self.assertRaises(ValueError):\r\n parse_hms('1234m')\r\n with self.assertRaises(ValueError):\r\n parse_hms('60s')\r\n with self.assertRaises(ValueError):\r\n parse_hms('1234s')\r\n self.assertEqual(parse_hms('0 0 0'), 0*15)\r\n self.assertEqual(parse_hms('1 30 36.36'), 1.5101*15)\r\n with self.assertRaises(ValueError):\r\n parse_hms('12 23')\r\n self.assertEqual(parse_hms('123.456'), 123.456*15)\r\n self.assertEqual(parse_hms('1000000000h 0m 0s'), 1000000000*15)\r\n self.assertEqual(parse_hms('1e+9h 0m 0s'), 1000000000*15)\r\n self.assertEqual(parse_hms('1e+0009h 0m 0s'), 1000000000*15)\r\n self.assertEqual(parse_hms('123.456', conversion_factor=2),\r\n 123.456*15)\r\n self.assertEqual(parse_hms('123.456h', conversion_factor=2),\r\n 123.456*15/2)\r\n with self.assertRaises(ValueError):\r\n parse_hms('1e400d')\r\n with self.assertRaises(ValueError):\r\n parse_hms('1e400')\r\n\r\n def test_parse_dms(self):\r\n \"\"\"DMS parse\"\"\"\r\n self.assertEqual(parse_dms('0d 0m 0s'), 0)\r\n self.assertEqual(parse_dms('1d 0m 0s'), 1)\r\n self.assertEqual(parse_dms('0d 30m 0s'), 0.5)\r\n self.assertEqual(parse_dms('0d 0m 36s'), 0.01)\r\n self.assertEqual(parse_dms('23d 30m 36s'), 23.51)\r\n self.assertEqual(parse_dms('23d 30\\' 36\"'), 23.51)\r\n self.assertEqual(parse_dms('23d 30\\' 36\\'\\''), 23.51)\r\n self.assertEqual(parse_dms('+23d 30m 36s'), 23.51)\r\n self.assertEqual(parse_dms(' + 23d 30m 36s'), 23.51)\r\n self.assertEqual(parse_dms('-23d 30m 36s'), -23.51)\r\n self.assertEqual(parse_dms(' - 23d 30m 36s'), -23.51)\r\n self.assertEqual(parse_dms('23D 30M 36S'), 23.51)\r\n with self.assertRaises(ValueError):\r\n parse_dms('23.1d 30m 36s')\r\n with self.assertRaises(ValueError):\r\n parse_dms('23.1d 30m')\r\n with self.assertRaises(ValueError):\r\n parse_dms('23d 30.123m 36s')\r\n with self.assertRaises(ValueError):\r\n parse_dms('30.123m 36s')\r\n with self.assertRaises(ValueError):\r\n parse_dms('23h 30m 36s')\r\n self.assertEqual(parse_dms('23.d 30.m 36.36s'), 23.5101)\r\n self.assertEqual(parse_dms('23.d 30.m 36.36s'), 23.5101)\r\n self.assertEqual(parse_dms('23.d30.m36.36s'), 23.5101)\r\n self.assertEqual(parse_dms('23.000d 30.000m 36.36000s'), 23.5101)\r\n self.assertEqual(parse_dms('0d 0m'), 0)\r\n self.assertEqual(parse_dms('0d 0.30m'), 0.005)\r\n self.assertEqual(parse_dms('0d 0s'), 0)\r\n self.assertEqual(parse_dms('0d 36.36s'), 0.0101)\r\n self.assertEqual(parse_dms('0m 0s'), 0)\r\n self.assertEqual(parse_dms('10d'), 10)\r\n self.assertEqual(parse_dms('10.123d'), 10.123)\r\n self.assertEqual(parse_dms('0m'), 0)\r\n self.assertEqual(parse_dms('30m'), 0.5)\r\n self.assertEqual(parse_dms('30.30m'), 0.505)\r\n self.assertEqual(parse_dms('36s'), 0.01)\r\n self.assertEqual(parse_dms('36.36s'), 0.0101)\r\n with self.assertRaises(ValueError):\r\n parse_dms('60m')\r\n with self.assertRaises(ValueError):\r\n parse_dms('1234m')\r\n with self.assertRaises(ValueError):\r\n parse_dms('60s')\r\n with self.assertRaises(ValueError):\r\n parse_dms('1234s')\r\n self.assertEqual(parse_dms('0 0 0'), 0)\r\n self.assertEqual(parse_dms('1 30 36.36'), 1.5101)\r\n with self.assertRaises(ValueError):\r\n parse_dms('12 23')\r\n self.assertEqual(parse_dms('123.456'), 123.456)\r\n self.assertEqual(parse_dms('1000000000d 0m 0s'), 1000000000)\r\n self.assertEqual(parse_dms('1e+9d 0m 0s'), 1000000000)\r\n self.assertEqual(parse_dms('1e+0009d 0m 0s'), 1000000000)\r\n self.assertEqual(parse_dms('1E+0009d 0m 0s'), 1000000000)\r\n self.assertEqual(parse_dms('123.456', conversion_factor=2), 123.456)\r\n self.assertEqual(parse_dms('123.456d', conversion_factor=2), 123.456/2)\r\n with self.assertRaises(ValueError):\r\n parse_dms('1e400d')\r\n with self.assertRaises(ValueError):\r\n parse_dms('1e400')\r\n\r\n def test_parse_dms_hms(self):\r\n \"\"\"DMS_HMS parse\"\"\"\r\n self.assertEqual(parse_dms_hms('1d 30m 36s'), 1.51)\r\n self.assertEqual(parse_dms_hms('1h 30m 36s'), 1.51*15)\r\n self.assertEqual(parse_dms_hms('1 30 36'), 1.51)\r\n self.assertEqual(parse_dms_hms('1.5'), 1.5)\r\n self.assertEqual(parse_dms_hms('1.5', conversion_factor=2), 1.5)\r\n self.assertEqual(parse_dms_hms('1 30 36', conversion_factor=2), 1.51/2)\r\n self.assertEqual(parse_dms_hms('1.5d', conversion_factor=2), 1.5/2)\r\n self.assertEqual(parse_dms_hms('1.5h', conversion_factor=2), 1.5*15/2)\r\n with self.assertRaises(ValueError):\r\n parse_dms('1e400d')\r\n with self.assertRaises(ValueError):\r\n parse_dms('1e400h')\r\n with self.assertRaises(ValueError):\r\n parse_dms('1e400')\r\n\r\n def test_parse_hms_dms(self):\r\n \"\"\"DMS_HMS parse\"\"\"\r\n self.assertEqual(parse_hms_dms('1d 30m 36s'), 1.51)\r\n self.assertEqual(parse_hms_dms('1h 30m 36s'), 1.51*15)\r\n self.assertEqual(parse_hms_dms('1.5'), 1.5*15)\r\n self.assertEqual(parse_hms_dms('1 30 36'), 1.51*15)\r\n self.assertEqual(parse_hms_dms('1 30 36', conversion_factor=2), 1.51*15/2)\r\n self.assertEqual(parse_hms_dms('1.5d', conversion_factor=2), 1.5/2)\r\n self.assertEqual(parse_hms_dms('1.5h', conversion_factor=2), 1.5*15/2)\r\n with self.assertRaises(ValueError):\r\n parse_dms('1e400d')\r\n with self.assertRaises(ValueError):\r\n parse_dms('1e400h')\r\n with self.assertRaises(ValueError):\r\n parse_dms('1e400')\r\n\r\n def test_format_dms_hms(self):\r\n \"\"\"DMS_HMS format\"\"\"\r\n self.assertEqual(format_dms_hms(0, None, 'degrees', '.3f', True), '0.000')\r\n self.assertEqual(format_dms_hms(0, None, 'degrees', '.3f', False), '0')\r\n self.assertEqual(format_dms_hms(123.4, None, 'degrees', '.3f', True), '123.400')\r\n self.assertEqual(format_dms_hms(123.4, None, 'degrees', '.3f', False), '123.4')\r\n self.assertEqual(format_dms_hms(123.456789, None, 'degrees', '.3f', True),\r\n '123.457')\r\n self.assertEqual(format_dms_hms(123.456789, None, 'degrees', '.3f', False),\r\n '123.457')\r\n self.assertEqual(format_dms_hms(-123.456789, None, 'degrees', '.3f', False),\r\n '-123.457')\r\n self.assertEqual(format_dms_hms(1e7, None, 'degrees', '.3f', False), '10000000')\r\n self.assertEqual(format_dms_hms(1e8, None, 'degrees', '.3f', False), '1e+08')\r\n self.assertEqual(format_dms_hms(1.01e8, None, 'degrees', '.3f', False),\r\n '1.01e+08')\r\n\r\n self.assertEqual(format_dms_hms(0, None, 'hours', '.3f', True), '0.00000')\r\n self.assertEqual(format_dms_hms(0, None, 'hours', '.3f', False), '0')\r\n self.assertEqual(format_dms_hms(121.86, None, 'hours', '.3f', True), '8.12400')\r\n self.assertEqual(format_dms_hms(121.86, None, 'hours', '.3f', False), '8.124')\r\n self.assertEqual(format_dms_hms(123.456789, None, 'hours', '.3f', True),\r\n '8.23045')\r\n self.assertEqual(format_dms_hms(123.456789, None, 'hours', '.3f', False),\r\n '8.23045')\r\n self.assertEqual(format_dms_hms(-123.456789, None, 'hours', '.3f', False),\r\n '-8.23045')\r\n self.assertEqual(format_dms_hms(15e8, None, 'hours', '.3f', False), '1e+08')\r\n self.assertEqual(format_dms_hms(15.15e8, None, 'hours', '.3f', False), '1.01e+08')\r\n\r\n self.assertEqual(format_dms_hms(0, None, 'radians', '.3f', True), '0.00000')\r\n self.assertEqual(format_dms_hms(0, None, 'radians', '.3f', False), '0')\r\n self.assertEqual(format_dms_hms(1e7, None, 'radians', '.3f', False), '10000000')\r\n self.assertEqual(format_dms_hms(1e8, None, 'radians', '.3f', False), '1e+08')\r\n self.assertEqual(format_dms_hms(1.01e8, None, 'radians', '.3f', False),\r\n '1.01e+08')\r\n\r\n self.assertEqual(format_dms_hms(0, None, 'dms', '.6f', False), '0d 00m 00s')\r\n self.assertEqual(format_dms_hms(0, None, 'dms', '.6f', True), '0d 00m 00.000s')\r\n self.assertEqual(format_dms_hms(0.0001, None, 'dms', '.6f', False),\r\n '0d 00m 00.36s')\r\n self.assertEqual(format_dms_hms(0.0001, None, 'dms', '.6f', True),\r\n '0d 00m 00.360s')\r\n self.assertEqual(format_dms_hms(-0.0001, None, 'dms', '.6f', False),\r\n '-0d 00m 00.36s')\r\n self.assertEqual(format_dms_hms(-0.0001, None, 'dms', '.6f', True),\r\n '-0d 00m 00.360s')\r\n self.assertEqual(format_dms_hms(700, None, 'dms', '.6f', False),\r\n '700d 00m 00s')\r\n self.assertEqual(format_dms_hms(699.99999987, None, 'dms', '.6f', False),\r\n '700d 00m 00s')\r\n self.assertEqual(format_dms_hms(699.99999986, None, 'dms', '.6f', False),\r\n '699d 59m 59.999s')\r\n self.assertEqual(format_dms_hms(-699.99999986, None, 'dms', '.6f', False),\r\n '-699d 59m 59.999s')\r\n self.assertEqual(format_dms_hms(1e7, None, 'dms', '.3f', False),\r\n '10000000d 00m 00s')\r\n self.assertEqual(format_dms_hms(1e8, None, 'dms', '.3f', False),\r\n '1e+08d 00m 00s')\r\n\r\n self.assertEqual(format_dms_hms(0, None, 'hms', '.6f', False), '0h 00m 00s')\r\n self.assertEqual(format_dms_hms(0, None, 'hms', '.6f', True), '0h 00m 00.0000s')\r\n self.assertEqual(format_dms_hms(0.0001*15, None, 'hms', '.6f', False),\r\n '0h 00m 00.36s')\r\n self.assertEqual(format_dms_hms(0.0001*15, None, 'hms', '.6f', True),\r\n '0h 00m 00.3600s')\r\n self.assertEqual(format_dms_hms(-0.0001*15, None, 'hms', '.6f', False),\r\n '-0h 00m 00.36s')\r\n self.assertEqual(format_dms_hms(-0.0001*15, None, 'hms', '.6f', True),\r\n '-0h 00m 00.3600s')\r\n self.assertEqual(format_dms_hms(700*15, None, 'hms', '.6f', False),\r\n '700h 00m 00s')\r\n self.assertEqual(format_dms_hms(699.99999987*15, None, 'hms', '.5f', False),\r\n '700h 00m 00s')\r\n self.assertEqual(format_dms_hms(699.99999986*15, None, 'hms', '.5f', False),\r\n '699h 59m 59.999s')\r\n self.assertEqual(format_dms_hms(-699.99999986*15, None, 'hms', '.5f', False),\r\n '-699h 59m 59.999s')\r\n self.assertEqual(format_dms_hms(1e7*15, None, 'hms', '.3f', False),\r\n '10000000h 00m 00s')\r\n self.assertEqual(format_dms_hms(1e8*15, None, 'hms', '.3f', False),\r\n '1e+08h 00m 00s')\r\n\r\n\r\n################################################################################\r\n################################################################################\r\n# UNITS AND FORMATS\r\n################################################################################\r\n\r\n# This dictionary is keyed by \"unit_id\", which is the name of the group of\r\n# formats/units. Within a given \"unit_id\" is a set of \"details\" that include\r\n# 1) The display name for the unit/format in the UI. This might be in a\r\n# dropdown box on the Search tab or in the Table View header or Detail Tab.\r\n# 2) The numerical conversion factor to apply to the number in the database.\r\n# If the value in the database is already the correct value, or is just being\r\n# sent to a formatting function, then the conversion factor is 1.\r\n# 3) The function to call, if any, to parse a string into a value with this\r\n# format/unit. Often the parse routine is the same for multiple units/formats\r\n# because we want the input to be free-form.\r\n# 4) The function to call, if any, to format a value with this format/unit as a\r\n# string.\r\n# In addition:\r\n# 'display_search': True or False, indicating whether the unit/format names\r\n# should be displayed on the Search Tab in a dropdown box.\r\n# This will generally be False when we're just doing a\r\n# format that has no alternative selections, like an SCLK.\r\n# 'display_result': True or False, indicating whether the unit/format names\r\n# should be displayed in any results (Table View, Detail\r\n# Tab). This will generally be False when it's really\r\n# obvious what the displayed format is, like YMDhms.\r\n# 'default': The name of the default unit/format.\r\n\r\nUNIT_FORMAT_DB = {\r\n 'range_cassini_sclk': {\r\n 'display_search': False,\r\n 'display_result': False,\r\n 'default': 'range_cassini_sclk',\r\n 'conversions': {\r\n 'range_cassini_sclk': (None, 1,\r\n parse_cassini_sclk, format_cassini_sclk, [])\r\n }\r\n },\r\n 'range_galileo_sclk': {\r\n 'display_search': False,\r\n 'display_result': False,\r\n 'default': 'range_galileo_sclk',\r\n 'conversions': {\r\n 'range_galileo_sclk': (None, 1,\r\n parse_galileo_sclk, format_galileo_sclk, [])\r\n }\r\n },\r\n 'range_new_horizons_sclk': {\r\n 'display_search': False,\r\n 'display_result': False,\r\n 'default': 'range_new_horizons_sclk',\r\n 'conversions': {\r\n 'range_new_horizons_sclk': (None, 1,\r\n parse_new_horizons_sclk,\r\n format_new_horizons_sclk, [])\r\n }\r\n },\r\n 'range_voyager_sclk': {\r\n 'display_search': False,\r\n 'display_result': False,\r\n 'default': 'range_voyager_sclk',\r\n 'conversions': {\r\n 'range_voyager_sclk': (None, 1,\r\n parse_voyager_sclk, format_voyager_sclk, [])\r\n }\r\n },\r\n 'range_cassini_rev_no': {\r\n 'display_search': False,\r\n 'display_result': False,\r\n 'default': 'range_cassini_rev_no',\r\n 'conversions': {\r\n 'range_cassini_rev_no': (None, 1,\r\n parse_cassini_orbit, format_cassini_orbit,\r\n [])\r\n }\r\n },\r\n 'datetime': {\r\n 'display_search': True,\r\n 'display_result': True,\r\n 'default': 'ymdhms',\r\n 'conversions': {\r\n 'ymdhms': ('YMDhms', 1, parse_time, format_time_ymd, []),\r\n 'ydhms': ('YDhms', 1, parse_time, format_time_ydoy, []),\r\n 'jd': ('JD', 1, parse_time, format_time_jd, []),\r\n 'jed': ('JED', 1, parse_time, format_time_jed, []),\r\n 'mjd': ('MJD', 1, parse_time, format_time_mjd, []),\r\n 'mjed': ('MJED', 1, parse_time, format_time_mjed, []),\r\n 'et': ('SPICE ET', 1, parse_time, format_time_et, [])\r\n }\r\n },\r\n 'duration': { # Difference between two datetimes\r\n 'display_search': True,\r\n 'display_result': True,\r\n 'default': 'seconds',\r\n 'conversions': {\r\n 'seconds': ('secs', 1, None, None,\r\n ['s', 'sec', 'secs', 'second', 'seconds']),\r\n 'microseconds': ('usecs', 0.000001, None, None,\r\n ['us', 'usec', 'usecs', 'microsecond',\r\n 'microseconds']),\r\n 'milliseconds': ('msecs', 0.001, None, None,\r\n ['ms', 'msec', 'msecs', 'millisecond',\r\n 'milliseconds']),\r\n 'minutes': ('minutes', 60., None, None,\r\n ['min', 'mins', 'minute', 'minutes']),\r\n 'hours': ('hours', 60.*60., None, None,\r\n ['h', 'hr', 'hrs', 'hour', 'hours']),\r\n 'days': ('days', 60.*60.*24., None, None,\r\n ['d', 'day', 'days']),\r\n }\r\n },\r\n 'generic_angle': { # Generic degrees, like lighting geometry\r\n 'display_search': True,\r\n 'display_result': True,\r\n 'default': 'degrees',\r\n 'conversions': {\r\n 'degrees': ('degrees', 1., None, None,\r\n ['d', 'deg', 'degs', 'degree', 'degrees']),\r\n 'radians': ('radians', DEG_RAD, None, None,\r\n ['r', 'rad', 'rads', 'radians']),\r\n }\r\n },\r\n 'latitude': { # Latitude on a body; includes declination\r\n 'display_search': True,\r\n 'display_result': True,\r\n 'default': 'degrees',\r\n 'conversions': {\r\n 'degrees': ('degrees', 1., parse_dms, format_dms_hms,\r\n []),\r\n 'dms': ('DMS', 1., parse_dms, format_dms_hms,\r\n []),\r\n 'radians': ('radians', DEG_RAD, parse_dms, format_dms_hms,\r\n []),\r\n }\r\n },\r\n 'longitude': { # Longitude on a body or ring\r\n 'display_search': True,\r\n 'display_result': True,\r\n 'default': 'degrees',\r\n 'conversions': {\r\n 'degrees': ('degrees', 1., parse_dms, format_dms_hms,\r\n []),\r\n 'dms': ('DMS', 1., parse_dms, format_dms_hms,\r\n []),\r\n 'radians': ('radians', DEG_RAD, parse_dms, format_dms_hms,\r\n []),\r\n }\r\n },\r\n # We do something unusual for hour_angle, since we need people to be\r\n # able to type in a number in either \"dms\" or \"hms\" format at any time and\r\n # have it do the right thing. As a result, we can't use the *15 unit\r\n # conversion factor, and we need a special format routine for hours as\r\n # a floating point number (which divides by 15) rather than using the normal\r\n # number conversion.\r\n 'hour_angle': { # Hour angle; includes right ascension\r\n 'display_search': True,\r\n 'display_result': True,\r\n 'default': 'degrees',\r\n 'conversions': {\r\n 'degrees': ('degrees', 1., parse_dms_hms, format_dms_hms,\r\n []),\r\n 'dms': ('DMS', 1., parse_dms_hms, format_dms_hms,\r\n []),\r\n 'hours': ('hours', 1., parse_hms_dms, format_dms_hms,\r\n []),\r\n 'hms': ('HMS', 1., parse_hms_dms, format_dms_hms,\r\n []),\r\n 'radians': ('radians', DEG_RAD,\r\n parse_dms_hms, format_dms_hms,\r\n []),\r\n }\r\n },\r\n 'distance_ring': {\r\n 'display_search': True,\r\n 'display_result': True,\r\n 'default': 'km',\r\n 'conversions': {\r\n 'km': ('km', 1, None, None,\r\n ['km', 'kms', 'kilometer', 'kilometers']),\r\n 'm': ('m', 1e-3, None, None,\r\n ['m', 'ms', 'meter', 'meters']),\r\n 'jupiterradii': ('Rj (71492)', 71492., None, None,\r\n ['rj(71492)', 'rj']),\r\n 'saturnradii': ('Rs (60330)', 60330., None, None,\r\n ['rs(60330)', 'rs']),\r\n 'neptuneradii': ('Rn (25225)', 25225., None, None,\r\n ['rn(25225)', 'rn']),\r\n 'uranusradii': ('Ru (25559)', 25559., None, None,\r\n ['ru(25559)', 'ru']),\r\n }\r\n },\r\n 'distance': {\r\n 'display_search': True,\r\n 'display_result': True,\r\n 'default': 'km',\r\n 'conversions': {\r\n 'km': ('km', 1, None, None,\r\n ['km', 'kms', 'kilometer', 'kilometers']),\r\n 'm': ('m', 1e-3, None, None,\r\n ['m', 'ms', 'meter', 'meters']),\r\n 'au': ('AU', 149597870.700, None, None,\r\n ['au'])\r\n }\r\n },\r\n 'distance_resolution': {\r\n 'display_search': True,\r\n 'display_result': True,\r\n 'default': 'km_pixel',\r\n 'conversions': {\r\n 'km_pixel': ('km/pixel', 1, None, None,\r\n ['km/p', 'km/pix', 'km/pixel', 'kmperpix',\r\n 'kmperpixel',\r\n 'kms/p', 'kms/pix', 'kms/pixel', 'kmsperpix',\r\n 'kmsperpixel',\r\n 'kilometer/p', 'kilometer/pix', 'kilometer/pixel',\r\n 'kilometerperpix', 'kilometerperpixel',\r\n 'kilometers/p', 'kilometers/pix',\r\n 'kilometers/pixel', 'kilometersperpix',\r\n 'kilometersperpixel']),\r\n 'm_pixel': ('m/pixel', 1e-3, None, None,\r\n ['m/p', 'm/pix', 'm/pixel', 'mperpix',\r\n 'mperpixel',\r\n 'ms/p', 'ms/pix', 'ms/pixel', 'msperpix',\r\n 'msperpixel',\r\n 'meter/p', 'meter/pix', 'meter/pixel',\r\n 'meterperpix', 'meterperpixel',\r\n 'meters/p', 'meters/pix',\r\n 'meters/pixel', 'metersperpix',\r\n 'metersperpixel']),\r\n }\r\n },\r\n 'wavelength': {\r\n 'display_search': True,\r\n 'display_result': True,\r\n 'default': 'microns',\r\n 'conversions': {\r\n 'microns': ('microns', 1., None, None,\r\n ['um', 'umeter', 'umeters',\r\n 'micron', 'microns',\r\n 'micrometer', 'micrometers']),\r\n 'angstroms': ('angstroms', 1e-4, None, None,\r\n ['ang', 'angstrom', 'angstroms']),\r\n 'nm': ('nm', 1e-3, None, None,\r\n ['nm', 'nanometer', 'nanometers']),\r\n 'cm': ('cm', 1e4, None, None,\r\n ['cm', 'centimeter', 'centimeters']),\r\n }\r\n },\r\n 'wavelength_resolution': {\r\n 'display_search': True,\r\n 'display_result': True,\r\n 'default': 'microns_pixel',\r\n 'conversions': {\r\n 'microns_pixel': ('microns/pixel', 1, None, None,\r\n ['um/p', 'um/pix', 'um/pixel', 'umperpix',\r\n 'umperpixel',\r\n 'micron/p', 'micron/pix', 'micron/pixel',\r\n 'micronperpix', 'micronperpixel',\r\n 'microns/p', 'microns/pix', 'microns/pixel',\r\n 'micronsperpix', 'micronsperpixel',\r\n 'micrometer/p', 'micrometer/pix',\r\n 'micrometer/pixel',\r\n 'micrometerperpix', 'micrometerperpixel',\r\n 'micrometers/p', 'micrometers/pix',\r\n 'micrometers/pixel', 'micrometersperpix',\r\n 'micrometersperpixel']),\r\n 'angstroms_pixel': ('angstroms/pixel', 1e-4, None, None,\r\n ['ang/p', 'ang/pix', 'ang/pixel',\r\n 'angperpix', 'angperpixel',\r\n 'angstrom/p', 'angstrom/pix',\r\n 'angstrom/pixel',\r\n 'angstromperpix', 'angstromperpixel',\r\n 'angstroms/p', 'angstroms/pix',\r\n 'angstroms/pixel',\r\n 'angstromsperpix', 'angstromsperpixel']),\r\n 'nm_pixel': ('nm/pixel', 1e-3, None, None,\r\n ['nm/p', 'nm/pix', 'nm/pixel', 'nmperpix',\r\n 'nmperpixel',\r\n 'nanometer/p', 'nanometer/pix',\r\n 'nanometer/pixel',\r\n 'nanometerperpix', 'nanometerperpixel',\r\n 'nanometers/p', 'nanometers/pix',\r\n 'nanometers/pixel', 'nanometersperpix',\r\n 'nanometersperpixel']),\r\n 'cm_pixel': ('cm/pixel', 1e4, None, None,\r\n ['cm/p', 'cm/pix', 'cm/pixel', 'cmperpix',\r\n 'cmperpixel',\r\n 'centimeter/p', 'centimeter/pix',\r\n 'centimeter/pixel',\r\n 'centimeterperpix', 'centimeterperpixel',\r\n 'centimeters/p', 'centimeters/pix',\r\n 'centimeters/pixel', 'centimetersperpix',\r\n 'centimetersperpixel']),\r\n }\r\n },\r\n 'wavenumber': {\r\n 'display_search': True,\r\n 'display_result': True,\r\n 'default': '1_cm',\r\n 'conversions': {\r\n '1_cm': ('cm^-1', 1., None, None,\r\n ['1/cm', 'cm^-1', 'cm**-1']),\r\n '1_m': ('m^-1', 1e-2, None, None,\r\n ['1/m', 'm^-1', 'm**-1']),\r\n }\r\n },\r\n 'wavenumber_resolution': {\r\n 'display_search': True,\r\n 'display_result': True,\r\n 'default': '1_cm_pixel',\r\n 'conversions': {\r\n '1_cm_pixel': ('cm^-1/pixel', 1., None, None,\r\n ['1/cm/p', '1/cm/pix', '1/cm/pixel', '1/cmperpix',\r\n '1/cmperpixel',\r\n '1/centimeter/p', '1/centimeter/pix',\r\n '1/centimeter/pixel',\r\n '1/centimeterperpix', '1/centimeterperpixel',\r\n 'cm^-1/p', 'cm^-1/pix', 'cm^-1/pixel',\r\n 'cm^-1perpix', 'cm^-1perpixel'\r\n 'cm**-1/p', 'cm**-1/pix', 'cm**-1/pixel',\r\n 'cm**-1perpix', 'cm**-1perpixel']),\r\n '1_m_pixel': ('m^-1/pixel', 1e-2, None, None,\r\n ['1/m/p', '1/m/pix', '1/m/pixel', '1/mperpix',\r\n '1/mperpixel',\r\n '1/meter/p', '1/meter/pix',\r\n '1/meter/pixel',\r\n '1/meterperpix', '1/meterperpixel',\r\n 'm^-1/p', 'm^-1/pix', 'm^-1/pixel',\r\n 'm^-1perpix', 'm^-1perpixel'\r\n 'm**-1/p', 'm**-1/pix', 'm**-1/pixel',\r\n 'm**-1perpix', 'm**-1perpixel']),\r\n }\r\n },\r\n}\r\n\r\n### NUMERICAL CONVERSION\r\n### (These routines *numerically* convert to/from the value stored in the\r\n### database with no formatting)\r\n\r\n# In all of the following functions, unit_id must be in the proper case. This is\r\n# a safe assumption because the unit_id comes from the ParamInfo structure.\r\n# On the other hand, unit can be in any case, since it's potentially supplied\r\n# by the user, and we force it to lower case.\r\n\r\ndef convert_to_default_unit(val, unit_id, unit):\r\n \"\"\"Convert a value from a specific unit to the default unit for unit_id.\"\"\"\r\n if unit_id is None and unit is not None:\r\n raise KeyError\r\n if val is None or (unit_id is None and unit is None):\r\n return val\r\n unit = unit.lower()\r\n default_unit = UNIT_FORMAT_DB[unit_id]['default']\r\n if default_unit == unit:\r\n return val\r\n ret = val * UNIT_FORMAT_DB[unit_id]['conversions'][unit][1]\r\n if not math.isfinite(ret):\r\n raise ValueError\r\n return ret\r\n\r\ndef convert_from_default_unit(val, unit_id, unit):\r\n \"\"\"Convert a value from the default unit to a specific unit for unit_id.\"\"\"\r\n if unit_id is None and unit is not None:\r\n raise KeyError\r\n if val is None or (unit_id is None and unit is None):\r\n return val\r\n unit = unit.lower()\r\n default_unit = UNIT_FORMAT_DB[unit_id]['default']\r\n if default_unit == unit:\r\n return val\r\n ret = val / UNIT_FORMAT_DB[unit_id]['conversions'][unit][1]\r\n if not math.isfinite(ret):\r\n raise ValueError\r\n return ret\r\n\r\n### GET INFORMATION ABOUT UNITS\r\n\r\ndef get_valid_units(unit_id):\r\n \"\"\"Get the list of valid units for a unit_id.\r\n\r\n If unit_id is None, we return None.\r\n \"\"\"\r\n unit_info = UNIT_FORMAT_DB.get(unit_id, None)\r\n valid_units = None\r\n if unit_info is not None:\r\n # This will create a list with the same order as written in the dict\r\n # initalization above.\r\n valid_units = list(unit_info['conversions'].keys())\r\n return valid_units\r\n\r\ndef get_unit_display_names(unit_id):\r\n \"\"\"Get a dictionary with valid units as keys and display names as values.\r\n\r\n If unit_id is None, we return None.\"\"\"\r\n unit_info = UNIT_FORMAT_DB.get(unit_id, None)\r\n display_names = None\r\n if unit_info is not None:\r\n display_names = {}\r\n valid_units = unit_info['conversions']\r\n for unit in valid_units:\r\n display_names[unit] = valid_units[unit][0]\r\n return display_names\r\n\r\ndef get_unit_display_name(unit_id, unit):\r\n \"\"\"Get the display name for a given valid unit_id and unit.\"\"\"\r\n unit = unit.lower()\r\n return UNIT_FORMAT_DB[unit_id]['conversions'][unit][0]\r\n\r\ndef is_valid_unit_id(unit_id):\r\n \"\"\"Check if a unit_id is valid.\"\"\"\r\n return unit_id in UNIT_FORMAT_DB\r\n\r\ndef is_valid_unit(unit_id, unit):\r\n \"\"\"Check if a unit is a valid unit for a valid unit_id.\"\"\"\r\n unit = unit.lower()\r\n return unit in UNIT_FORMAT_DB[unit_id]['conversions']\r\n\r\ndef get_default_unit(unit_id):\r\n \"\"\"Return the default unit for a unit_id.\"\"\"\r\n if unit_id is None:\r\n return None\r\n return UNIT_FORMAT_DB[unit_id]['default']\r\n\r\ndef display_search_unit(unit_id):\r\n \"\"\"Check if a unit name should be displayed for a unit_id on the Search tab.\"\"\"\r\n if not unit_id:\r\n return False\r\n return UNIT_FORMAT_DB[unit_id]['display_search']\r\n\r\ndef display_result_unit(unit_id):\r\n \"\"\"Check if a unit name should be displayed for a unit_id for results.\"\"\"\r\n if not unit_id:\r\n return False\r\n return UNIT_FORMAT_DB[unit_id]['display_result']\r\n\r\ndef display_unit_ever(unit_id):\r\n \"\"\"Check if a unit name should ever be displayed for a unit_id.\"\"\"\r\n return display_search_unit(unit_id) or display_result_unit(unit_id)\r\n\r\ndef get_disp_default_and_avail_units(param_form_type):\r\n \"\"\"Return display, default, and available units for a given ParamInfo form type.\"\"\"\r\n (form_type, form_type_format,\r\n form_type_unit_id) = parse_form_type(param_form_type)\r\n\r\n is_displayed = display_result_unit(form_type_unit_id)\r\n if not is_displayed:\r\n return None, None, None\r\n\r\n available_units = get_unit_display_names(form_type_unit_id)\r\n default_unit = get_default_unit(form_type_unit_id)\r\n disp_unit = get_unit_display_name(form_type_unit_id, default_unit)\r\n return disp_unit, default_unit, available_units\r\n\r\n### FORMAT A VALUE FOR A GIVEN UNIT\r\n\r\ndef adjust_format_string_for_units(numerical_format, unit_id, unit):\r\n \"\"\"Adjust a format string size for a change of units.\r\n\r\n This takeas a format string of the form \".f\" and adjusts the value\r\n of based on the ratio of the given unit to the default unit.\r\n If the format string is anything else, it is left unchanged.\r\n \"\"\"\r\n if unit_id is None:\r\n return numerical_format\r\n if (not numerical_format.startswith('.') or\r\n not numerical_format.endswith('f')):\r\n return numerical_format\r\n unit = unit.lower()\r\n default_unit = UNIT_FORMAT_DB[unit_id]['default']\r\n if default_unit == unit:\r\n return numerical_format\r\n # The behavior of ceil is to increase the number of positive numbers\r\n # (which is adding decimal places), which is good. And it's to decrease\r\n # the absolute value of negative numbers (which is removing decimal places),\r\n # which is also good. In both cases we're being conservative - adding too\r\n # many or removing too few.\r\n factor = int(np.ceil(np.log10(\r\n UNIT_FORMAT_DB[unit_id]['conversions'][unit][1])))\r\n dec = max(int(numerical_format[1:-1]) + factor, 0)\r\n return '.' + str(dec) + 'f'\r\n\r\ndef format_unit_value(val, numerical_format, unit_id, unit,\r\n keep_trailing_zeros=False, convert_from_default=True):\r\n \"\"\"Format a value based on the unit_id and specific unit.\r\n\r\n val The value to be formatted.\r\n numerical_format A string like \".5f\" that specifies the numerical format to be\r\n used if this unit system does not include a formatting\r\n function. The number of decimal places will be adjusted, as\r\n appropriate, based on the units requested.\r\n unit_id The id of the unit system.\r\n unit The requested output unit. None means use the default unit.\r\n keep_trailing_zeros If True, keep the zeros at the end of a decimal floating point\r\n number (e.g. 2.1000).\r\n convert_from_default If True, convert the value from the default unit to the\r\n requested unit.\r\n \"\"\"\r\n if val is None or isinstance(val, str):\r\n return val\r\n format_func = None\r\n if unit_id is not None:\r\n if unit is None:\r\n unit = get_default_unit(unit_id)\r\n unit = unit.lower()\r\n if convert_from_default:\r\n val = convert_from_default_unit(val, unit_id, unit)\r\n format_func = UNIT_FORMAT_DB[unit_id]['conversions'][unit][3]\r\n if format_func is None:\r\n if numerical_format is None:\r\n return str(val)\r\n if abs(val) >= 1e8:\r\n numerical_format = numerical_format.replace('f', 'e')\r\n new_format = adjust_format_string_for_units(numerical_format,\r\n unit_id, unit)\r\n ret = ('{:'+new_format+'}').format(val)\r\n if not keep_trailing_zeros:\r\n ret = _strip_trailing_zeros(ret)\r\n return ret\r\n return format_func(val, unit_id=unit_id, unit=unit,\r\n numerical_format=numerical_format,\r\n keep_trailing_zeros=keep_trailing_zeros)\r\n\r\ndef _strip_trailing_zeros(s):\r\n \"\"\"Strip meaningless trailing zeros (like after a decimal point).\"\"\"\r\n if re.fullmatch(r'.*\\.\\d*0*', s):\r\n # Strip trailing .000s from NNN.DDDZZZ\r\n s = s.rstrip('0').rstrip('.')\r\n elif re.fullmatch(r'.*\\.\\d*0*e[+-]\\d+', s):\r\n # Strip trailing .000s from the mantissa part of NNN.DDDZZZe+EEE\r\n s1, s2 = s.split('e')\r\n s1 = s1.rstrip('0').rstrip('.')\r\n s = s1+'e'+s2\r\n return s\r\n\r\ndef _clean_numeric_field(s, compress_spaces=True):\r\n \"\"\"Remove useless characters like , or _ from a string.\"\"\"\r\n ret = s.lower().replace(',', '').replace('_','')\r\n if compress_spaces:\r\n ret = ret.replace(' ', '')\r\n return ret\r\n\r\ndef parse_unit_value(s, numerical_format, unit_id, unit):\r\n \"\"\"Parse a string given the unit and numerical format.\r\n\r\n We assume that the value returned should be in the given unit, so\r\n normally there is no conversion done. However, if the user explicitly\r\n specifies a unit, like \"1 km\", then we convert from that unit to the\r\n passed-in unit.\"\"\"\r\n if s is None or s == '':\r\n return None\r\n parse_func = None\r\n if unit_id is not None:\r\n if unit is None:\r\n unit = get_default_unit(unit_id)\r\n unit = unit.lower()\r\n (display_name, conversion_factor, parse_func,\r\n display_func, _) = UNIT_FORMAT_DB[unit_id]['conversions'][unit]\r\n if parse_func is None:\r\n # Direct numeric conversion with no special parsing\r\n # Choose between float or int parsing\r\n parse_func = float\r\n if numerical_format and numerical_format[-1] == 'd':\r\n parse_func = int\r\n\r\n # Clean the string, including converting to lower case and eliminating\r\n # spaces\r\n s = _clean_numeric_field(s)\r\n force_unit = None\r\n if unit_id:\r\n # Look for an overriding unit name suffix, like \"1 km\"\r\n conversions = UNIT_FORMAT_DB[unit_id]['conversions']\r\n # Build a list of all possible suffixes. Sort the possible suffixes\r\n # by descending length so that we find, for example, \"km\" before \"m\"\r\n sorted_suffixes = []\r\n for trial_unit, trial_conversion in conversions.items():\r\n trial_suffix_list = trial_conversion[4]\r\n for suffix in trial_suffix_list:\r\n sorted_suffixes.append((suffix, trial_unit, trial_conversion))\r\n sorted_suffixes.sort(key=lambda x: -len(x[0]))\r\n for trial_suffix, trial_unit, trial_conversion in sorted_suffixes:\r\n if s.endswith(trial_suffix):\r\n force_unit = trial_unit\r\n # Strip off the unit name from the number\r\n s = s[:-len(trial_suffix)]\r\n break\r\n ret = parse_func(s) # Parse the int or float\r\n if not math.isfinite(ret):\r\n raise ValueError\r\n if force_unit is not None:\r\n ret = convert_to_default_unit(ret, unit_id, force_unit)\r\n ret = convert_from_default_unit(ret, unit_id, unit)\r\n return ret\r\n\r\n # We only adjust for the conversion factor for non-standard parsers, because\r\n # those are ones that might specify an explicit unit (like \"1d\" for radians)\r\n # but we wouldn't have caught it as part of the generic numeric processing\r\n # above\r\n return parse_func(s, conversion_factor=conversion_factor,\r\n numerical_format=numerical_format,\r\n unit_id=unit_id, unit=unit)\r\n\r\ndef parse_form_type(s):\r\n \"\"\"Parse the ParamInfo FORM_TYPE with its subfields.\r\n\r\n Subfields are:\r\n TYPE[%format][:unit]\r\n \"\"\"\r\n if s is None:\r\n return None, None, None\r\n\r\n form_type = s\r\n form_type_format = None\r\n form_type_unit = None\r\n\r\n if form_type.find(':') != -1:\r\n form_type, form_type_unit = form_type.split(':')\r\n if form_type.find('%') != -1:\r\n form_type, form_type_format = form_type.split('%')\r\n\r\n return form_type, form_type_format, form_type_unit\r\n\r\ndef get_single_parse_function(unit_id):\r\n \"\"\"Return the parse func for a unit_id with a single non-displayed unit.\"\"\"\r\n parse_func = None\r\n if unit_id and not display_unit_ever(unit_id):\r\n default_unit = get_default_unit(unit_id)\r\n parse_func = (UNIT_FORMAT_DB[unit_id]['conversions'][default_unit][2])\r\n return parse_func\r\n\r\ndef get_single_format_function(unit_id):\r\n \"\"\"Return the format func for a unit_id with a single non-displayed unit.\"\"\"\r\n format_func = None\r\n if unit_id and not display_unit_ever(unit_id):\r\n default_unit = get_default_unit(unit_id)\r\n format_func = (UNIT_FORMAT_DB[unit_id]['conversions'][default_unit][3])\r\n return format_func\r\n\r\n\r\nclass UnitConversionTests(unittest.TestCase):\r\n def test_cvt_to_default_unit(self):\r\n \"\"\"Test convert_to_default_unit\"\"\"\r\n with self.assertRaises(KeyError):\r\n convert_to_default_unit(0, None, 'm')\r\n self.assertIsNone(convert_to_default_unit(None, 'x', 'y'))\r\n self.assertEqual(convert_to_default_unit(10, None, None), 10)\r\n self.assertEqual(convert_to_default_unit(100, 'duration', 'seconds'),\r\n 100)\r\n self.assertEqual(convert_to_default_unit(100, 'duration',\r\n 'milliseconds'), 0.1)\r\n with self.assertRaises(ValueError):\r\n convert_to_default_unit(1e307, 'duration', 'days')\r\n\r\n def test_cvt_from_default_unit(self):\r\n \"\"\"Test convert_from_default_unit\"\"\"\r\n with self.assertRaises(KeyError):\r\n convert_from_default_unit(0, None, 'm')\r\n self.assertIsNone(convert_from_default_unit(None, 'x', 'y'))\r\n self.assertEqual(convert_from_default_unit(10, None, None), 10)\r\n self.assertEqual(convert_from_default_unit(100, 'duration', 'seconds'),\r\n 100)\r\n self.assertEqual(convert_from_default_unit(100, 'duration',\r\n 'milliseconds'), 100000)\r\n with self.assertRaises(ValueError):\r\n convert_from_default_unit(1e307, 'duration', 'milliseconds')\r\n\r\n def test_get_valid_units(self):\r\n \"\"\"Test get_valid_units\"\"\"\r\n self.assertIsNone(get_valid_units('fred'))\r\n self.assertEqual(get_valid_units('duration'),\r\n ['seconds', 'microseconds', 'milliseconds', 'minutes',\r\n 'hours', 'days'])\r\n\r\n def test_get_unit_display_names(self):\r\n \"\"\"Test get_unit_display_names\"\"\"\r\n self.assertIsNone(get_unit_display_names('fred'))\r\n self.assertEqual(get_unit_display_names('datetime'),\r\n {'ymdhms': 'YMDhms',\r\n 'ydhms': 'YDhms',\r\n 'jd': 'JD',\r\n 'jed': 'JED',\r\n 'mjd': 'MJD',\r\n 'mjed': 'MJED',\r\n 'et': 'SPICE ET'})\r\n\r\n def test_get_unit_display_name(self):\r\n \"\"\"Test get_unit_display_name\"\"\"\r\n self.assertEqual(get_unit_display_name('latitude', 'dms'), 'DMS')\r\n\r\n def test_is_valid_unit_id(self):\r\n \"\"\"Test is_valid_unit_id\"\"\"\r\n self.assertFalse(is_valid_unit_id('fred'))\r\n self.assertTrue(is_valid_unit_id('generic_angle'))\r\n\r\n def test_is_valid_unit(self):\r\n \"\"\"Test is_valid_unit\"\"\"\r\n self.assertFalse(is_valid_unit('generic_angle', 'fred'))\r\n self.assertTrue(is_valid_unit('generic_angle', 'radians'))\r\n\r\n def test_get_default_unit(self):\r\n \"\"\"Test get_default_unit\"\"\"\r\n self.assertFalse(get_default_unit(None))\r\n self.assertEqual(get_default_unit('longitude'), 'degrees')\r\n\r\n def test_display_search_unit(self):\r\n \"\"\"Test display_search_unit\"\"\"\r\n self.assertFalse(display_search_unit(None))\r\n self.assertTrue(display_search_unit('longitude'))\r\n self.assertFalse(display_search_unit('range_cassini_rev_no'))\r\n\r\n def test_display_result_unit(self):\r\n \"\"\"Test display_result_unit\"\"\"\r\n self.assertFalse(display_result_unit(None))\r\n self.assertTrue(display_result_unit('longitude'))\r\n self.assertFalse(display_result_unit('range_cassini_rev_no'))\r\n\r\n def test_display_unit_ever(self):\r\n \"\"\"Test display_unit_ever\"\"\"\r\n self.assertFalse(display_unit_ever(None))\r\n self.assertTrue(display_unit_ever('longitude'))\r\n self.assertFalse(display_unit_ever('range_cassini_rev_no'))\r\n\r\n def test_disp_default_and_avail_units(self):\r\n \"\"\"Test get_disp_default_and_avail_units\"\"\"\r\n self.assertEqual(get_disp_default_and_avail_units(\r\n '%d:range_cassini_rev_no'), (None, None, None))\r\n self.assertEqual(get_disp_default_and_avail_units('%d:wavenumber'),\r\n ('cm^-1', '1_cm', {'1_cm': 'cm^-1', '1_m': 'm^-1'}))\r\n\r\n def test_adjust_format_string_for_units(self):\r\n \"\"\"Test adjust_format_string_for_units\"\"\"\r\n self.assertEqual(adjust_format_string_for_units('.6f', None, None),\r\n '.6f')\r\n self.assertEqual(adjust_format_string_for_units(\r\n '6d', 'wavenumber', '1_m'), '6d')\r\n self.assertEqual(adjust_format_string_for_units(\r\n '.6f', 'wavenumber', '1_M'), '.4f')\r\n self.assertEqual(adjust_format_string_for_units(\r\n '.6f', 'duration', 'days'), '.11f')\r\n self.assertEqual(adjust_format_string_for_units(\r\n '.6f', 'duration', 'seconds'), '.6f')\r\n\r\n def test_format_unit_value(self):\r\n \"\"\"Test format_unit_value\"\"\"\r\n self.assertEqual(format_unit_value('string', None, None, None),\r\n 'string')\r\n self.assertIsNone(format_unit_value(None, None, 'datetime', 'ydhms'))\r\n self.assertEqual(format_unit_value(600000000, None, 'datetime',\r\n 'ydhms'),\r\n '2019-005T10:39:23.000')\r\n self.assertEqual(format_unit_value(600000000, None, 'datetime',\r\n 'ymdhms'),\r\n '2019-01-05T10:39:23.000')\r\n self.assertEqual(format_unit_value(100.1, '.3f', 'duration', 'seconds'),\r\n '100.1')\r\n self.assertEqual(format_unit_value(100.1, '.3f', None, None),\r\n '100.1')\r\n self.assertEqual(format_unit_value(100.1, '.3f', 'duration', None),\r\n '100.1')\r\n self.assertEqual(format_unit_value(100.1, '.3f', 'duration', 'seconds',\r\n keep_trailing_zeros=True),\r\n '100.100')\r\n self.assertEqual(format_unit_value(100.1, '.3f', 'duration', 'days',\r\n keep_trailing_zeros=True),\r\n '0.00115856')\r\n self.assertEqual(format_unit_value(100.1, '.3f', 'duration', 'days',\r\n keep_trailing_zeros=True,\r\n convert_from_default=False),\r\n '100.10000000')\r\n self.assertEqual(format_unit_value(100, None, 'duration', 'seconds'),\r\n '100')\r\n self.assertEqual(format_unit_value(1e7, '.3f', 'duration', 'seconds'),\r\n '10000000')\r\n self.assertEqual(format_unit_value(1e8, '.3f', 'duration', 'seconds'),\r\n '1e+08')\r\n\r\n def test_parse_unit_value(self):\r\n \"\"\"Test parse_unit_value.\"\"\"\r\n self.assertIsNone(parse_unit_value(None, 'x', 'x', 'x'))\r\n self.assertIsNone(parse_unit_value('', 'x', 'x', 'x'))\r\n self.assertEqual(parse_unit_value('100', '.3f', 'duration', None), 100)\r\n val = parse_unit_value('100000', '.3f', 'duration', 'milliseconds')\r\n self.assertIsInstance(val, float)\r\n self.assertEqual(val, 100000)\r\n val = parse_unit_value('100s', '.3f', 'duration', 'milliseconds')\r\n self.assertIsInstance(val, float)\r\n self.assertEqual(val, 100000)\r\n val = parse_unit_value('100', 'd', None, None)\r\n self.assertIsInstance(val, int)\r\n self.assertEqual(val, 100)\r\n with self.assertRaises(ValueError):\r\n parse_unit_value('inf', '.3f', 'duration', 'milliseconds')\r\n self.assertEqual(parse_unit_value('2019-01-05T10:39:23.000', None, 'datetime',\r\n 'YMDhms'), 600000000)\r\n\r\n def test_parse_form_type(self):\r\n \"\"\"Test parse_form_type.\"\"\"\r\n self.assertEqual(parse_form_type(None), (None, None, None))\r\n self.assertEqual(parse_form_type('X'), ('X', None, None))\r\n self.assertEqual(parse_form_type('X:Y'), ('X', None, 'Y'))\r\n self.assertEqual(parse_form_type('X%Z'), ('X', 'Z', None))\r\n self.assertEqual(parse_form_type('X%Z:Y'), ('X', 'Z', 'Y'))\r\n\r\n def test_get_single_parse_function(self):\r\n \"\"\"Test get_single_parse_function.\"\"\"\r\n self.assertIsNone(get_single_parse_function(None))\r\n self.assertIsNone(get_single_parse_function('datetime'))\r\n self.assertEqual(get_single_parse_function('range_cassini_rev_no'),\r\n parse_cassini_orbit)\r\n\r\n def test_get_single_format_function(self):\r\n \"\"\"Test get_single_format_function.\"\"\"\r\n self.assertIsNone(get_single_format_function(None))\r\n self.assertIsNone(get_single_format_function('datetime'))\r\n self.assertEqual(get_single_format_function('range_cassini_rev_no'),\r\n format_cassini_orbit)\r\n\r\n\r\nif __name__ == '__main__': # pragma: no cover\r\n unittest.main()\r\n","sub_path":"lib/opus_support.py","file_name":"opus_support.py","file_ext":"py","file_size_in_byte":99718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"403194762","text":"# -*- coding: utf-8 -*-\n\nimport json\nimport sqlite3\nfrom pprint import pprint\n\nconn = sqlite3.connect('') # filepath to sqlite file\nc = conn.cursor()\n\nwith open('') as f: # filepath to JSON file\n dataset = json.load(f)\n\nfor data in dataset['']: # dataset is a node to iterate\n print(f'INSERT INTO ') # raw query on f string format\n try:\n c.execute(f'INSERT INTO ') # raw query on f string format\n except sqlite3.IntegrityError as e:\n print('sqlite error: ', e.args[0])\n conn.commit()\n\nconn.close()\n","sub_path":"jsonimporter.py","file_name":"jsonimporter.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"521907526","text":"import pandas as pd\nfrom sys import argv\nfrom pathlib import Path\n\nfilename = Path(argv[1])\n\ndata = pd.read_excel(filename)\na = data.iloc[:, 0]\nb = data.iloc[:, 1]\na_jian_b = set(a.unique()) - set(b.unique())\nout = pd.DataFrame(a_jian_b)\nout.to_excel(filename.with_name('chaji.xlx'))\n","sub_path":"a_jian_b.py","file_name":"a_jian_b.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"391539539","text":"#mdc\n\nnum1 = num3 = int(input(\"Numero 01: \"))\nnum2 = num4 = int(input(\"Numero 02: \"))\n\nmdc = 0\n#\n\n#resto = None\nresto = 1\n#while resto is not 0:\nwhile resto != 0:\n resto = num1 % num2\n num1 = num2\n num2 = resto\n mdc = num1\n\nprint(\"O MDC (%d,%d) = %d\" %(num3, num4, mdc))\n\n","sub_path":"estruturas repeticao/codigos/q15.py","file_name":"q15.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"109048185","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport json\nimport random as rnd\nfrom requests import put, get\nimport numpy as np\n\n# SETUP:\nBASE_URL = \"http://localhost:8080\" # Server location\nid = 4 # Experiment ID of the persuasive sample experiment\nkey= \"3b6d015550\" # Key\n\nN=100 # Number of calls\n\nuserid = 12 # Id of user\nweather = [\"sunny\", \"rainy\"] # Possible values for weather\n\n\n\nfor i in range(N):\n \n # Get the action (and print the call and the result): \n currentweather = rnd.choice(weather)\n url = \"{}/{}/getAction.json?key={}&context={}\".format(BASE_URL,id,key,json.dumps({\"weather\":currentweather,\"userid\":userid})) \n print(url) \n result = get(url)\n print(result.text)\n \n jsonobj = json.loads(result.text)\n action = float(jsonobj[\"action\"][\"distance\"]) \n \n # Set reward\n actualkm = action * 1.2 # This user overshoots his goal all the time\n url = \"{}/{}/setReward.json?key={}&reward={}&context={}\".format(BASE_URL,id,key,json.dumps({\"km\":actualkm}),json.dumps({\"weather\":currentweather,\"userid\":userid})) \n print(url) \n result = get(url)\n print(result.text)\n","sub_path":"utils/agents/persuasive2016.py","file_name":"persuasive2016.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"332430951","text":"from peewee import *\n\n# Configure your database connection here\n# database name = should be your username on your laptop\n# database user = should be your username on your laptop\ndb = PostgresqlDatabase('postgres', user='levente', password='postgres')\n\n\nclass BaseModel(Model):\n \"\"\"A base model that will use our Postgresql database\"\"\"\n class Meta:\n database = db #nothing special happened here, DB reached i guess\n\n\nclass CodecoolClass(BaseModel):\n # setting up Model attributes\n\tlocation = CharField()\n\tyear = IntegerField()\n\n # Functions returning student + mentors\n\tdef mentors(self):\n\t\treturn self.mentor\n\n\tdef students(self):\n\t\treturn self.student\n\n\n# Building the models for the test\nclass Person(BaseModel):\n first_name = CharField()\n last_name = CharField()\n year_of_birth = DateField()\n gender = CharField()\n codecool_class = ForeignKeyField(CodecoolClass, related_name='person') # Setting up ForeignKey\n\n\n# getting bored now...\nclass Mentor(BaseModel):\n nickname = CharField()\n first_name = CharField()\n last_name = CharField()\n year_of_birth = DateField()\n gender = CharField()\n codecool_class = ForeignKeyField(CodecoolClass, related_name='mentor') # related_name always == class name\n\n\n# OH YEAH NEW FIELDS!!44!44 :))\nclass Student(BaseModel):\n knowledge_level = IntegerField()\n energy_level = IntegerField()\n first_name = CharField()\n last_name = CharField()\n year_of_birth = DateField()\n gender = CharField()\n codecool_class = ForeignKeyField(CodecoolClass, related_name='student')\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"456063993","text":"import random\n\n\ndef deste_olustur():\n deste_listesi = []\n for i in kart_turleri:\n for j in kart_degerleri:\n deste_listesi.append([i, j])\n return deste_listesi\n\ndeste = deste_olustur()\n\n\ndef kart_sil(kart): # desteden seçilen kartı silmek için\n return deste.remove(kart)\n\n\ndef oyuncunun_kartlari():\n kart1 = random.choice(deste)\n kart_sil(kart1)\n kart2 = random.choice(deste)\n kart_sil(kart2)\n return kart1, kart2\n\n\ndef dagiticinin_kartlari():\n acik_kart = random.choice(deste)\n kart_sil(acik_kart)\n kapali_kart = random.choice(deste)\n kart_sil(kapali_kart)\n return acik_kart, kapali_kart\n\n\ndef kart_cekme(): # oyuncu kart çekmek istediğinde kart çeker\n cekilen_kart = random.choice(deste)\n kart_sil(cekilen_kart)\n return cekilen_kart\n\n\ndef dagitici_kart_cekme(): # dağıtıcının kart çekmesi gerektiğinde kart çeker\n dagitici_kart = random.choice(deste)\n kart_sil(dagitici_kart)\n return dagitici_kart\n\n\ndef dagitici_kart_toplamlari(dgt_kartlar):\n toplam = 0\n for i in range(len(dgt_kartlar)):\n if dgt_kartlar[i][1] == \"Vale\" or dgt_kartlar[i][1] == \"Kız\" or dgt_kartlar[i][1] == \"Papaz\":\n dgt_kartlar[i][1] = 10\n if dgt_kartlar[i][1] == \"As\":\n dgt_kartlar[i][1] = 11\n for j in range(len(dgt_kartlar)):\n if toplam > 21 and dgt_kartlar[j][1] == 11:\n dgt_kartlar[j][1] = 1\n toplam += dgt_kartlar[j][1]\n return toplam\n\n\ndef oyuncu_kart_toplamlari(oyn_kartlar):\n toplam, toplam11 = 0, 0\n for i in range(len(oyn_kartlar)):\n if oyn_kartlar[i][1] == \"Vale\" or oyn_kartlar[i][1] == \"Kız\" or oyn_kartlar[i][1] == \"Papaz\":\n oyn_kartlar[i][1] = 10\n if oyn_kartlar[i][1] == \"As\":\n oyn_kartlar[i][1] = 1\n toplam += oyn_kartlar[i][1]\n if oyn_kartlar[i][1] == 1:\n toplam11 = toplam + 10\n return [toplam, toplam11]\n\n\ndef blackjack_21():\n tekrar = \"e\"\n while tekrar.lower() == \"e\":\n dgt_kartlar = list(dagiticinin_kartlari()) # dağıtıcının kartları listesi\n oyn_kartlar = list(oyuncunun_kartlari()) # oyuncunun kartları listesi\n print(\"Dağıtıcının açık kartı {} {}\".format(dgt_kartlar[0][0], dgt_kartlar[0][1]))\n print(\"Kartlarınız {} {}, {} {} \".format(oyn_kartlar[0][0], oyn_kartlar[0][1], oyn_kartlar[1][0],\n oyn_kartlar[1][1]), end=\"\")\n if oyn_kartlar[0][1] == \"As\" or oyn_kartlar[1][1] == \"As\":\n print(\" (toplam {} ya da {}) \".format(oyuncu_kart_toplamlari(oyn_kartlar)[0],\n oyuncu_kart_toplamlari(oyn_kartlar)[1]))\n else:\n print(\" (toplam {})\".format(oyuncu_kart_toplamlari(oyn_kartlar)[0]))\n if oyuncu_kart_toplamlari(oyn_kartlar)[0] == 21 or oyuncu_kart_toplamlari(oyn_kartlar)[1] == 21:\n print(\"Blackjack! Sıra dağıtıcıda.\")\n print(\"Dağıtıcının kartları {} {}, {} {}\".format(dgt_kartlar[0][0], dgt_kartlar[0][1], dgt_kartlar[1][0],\n dgt_kartlar[1][1]), end=\"\")\n print(\" (toplam {})\".format(dagitici_kart_toplamlari(dgt_kartlar)))\n while dagitici_kart_toplamlari(dgt_kartlar) < 17:\n yeni_dgt_kart = dagitici_kart_cekme()\n dgt_kartlar.append(yeni_dgt_kart) # çekilen yeni kart dağıtıcının kartları listesine ekleniyor\n print(\"Dağıtıcının kartları {}\".format(dgt_kartlar), end=\"\")\n print(\" (toplam {})\".format(dagitici_kart_toplamlari(dgt_kartlar)))\n if dagitici_kart_toplamlari(dgt_kartlar) > 21:\n print(\"Dağıtıcı battı. Kazandınız!\")\n break\n elif dagitici_kart_toplamlari(dgt_kartlar) == 21:\n print(\"Dağıtıcı blackjack yaptı! Oyun berabere.\")\n tekrar = input(\"Tekrar oynamak istiyor musunuz? (e/h):\")\n harfler2 = [\"e\", \"E\", \"h\", \"H\"]\n while tekrar not in harfler2:\n tekrar = input(\"Tekrar oynamak istiyor musunuz? (e/h):\")\n else:\n print(\"Kazandınız!\")\n break\n else:\n ek_kart = input(\"Kart ya da Pas (k/p):\")\n harfler1 = [\"k\", \"K\", \"p\", \"P\"]\n while ek_kart not in harfler1:\n ek_kart = input(\"Kart ya da Pas (k/p):\")\n while ek_kart.lower() == \"k\":\n yeni_oyn_kart = kart_cekme()\n oyn_kartlar.append(yeni_oyn_kart)\n print(\"Çekilen kart {} {}\".format(yeni_oyn_kart[0], yeni_oyn_kart[1]), end=\"\")\n if yeni_oyn_kart[0][1] == \"As\":\n print(\" (toplam {} ya da {}) \".format(oyuncu_kart_toplamlari(oyn_kartlar)[0],\n oyuncu_kart_toplamlari(oyn_kartlar)[1]))\n else:\n print(\" (yeni toplam {})\".format(oyuncu_kart_toplamlari(oyn_kartlar)[0]))\n if oyuncu_kart_toplamlari(oyn_kartlar)[0] > 21 or oyuncu_kart_toplamlari(oyn_kartlar)[1] > 21:\n # oyuncu_kart_toplamlari fonksiyonu iki toplam döndürdüğü için böyle yapıldı\n print(\"Battınız. Dağıtıcı kazandı.\")\n break\n elif oyuncu_kart_toplamlari(oyn_kartlar)[0] == 21 or oyuncu_kart_toplamlari(oyn_kartlar)[1] == 21:\n print(\"Blackjack! Sıra dağıtıcıda.\")\n print(\"Dağıtıcının kartları {} {}, {} {}\".format(dgt_kartlar[0][0], dgt_kartlar[0][1],\n dgt_kartlar[1][0], dgt_kartlar[1], [1]), end=\"\")\n print(\" (toplam {})\".format(dagitici_kart_toplamlari(dgt_kartlar)))\n while dagitici_kart_toplamlari(dgt_kartlar) < 17:\n yeni_dgt_kart = dagitici_kart_cekme()\n dgt_kartlar.append(yeni_dgt_kart)\n print(\"Dağıtıcının kartları {}\".format(dgt_kartlar), end=\"\")\n print(\" (toplam {})\".format(dagitici_kart_toplamlari(dgt_kartlar)))\n if dagitici_kart_toplamlari(dgt_kartlar) > 21:\n print(\"Dağıtıcı battı. Kazandınız!\")\n break\n elif dagitici_kart_toplamlari(dgt_kartlar) == 21:\n print(\"Dağıtıcı blackjack yaptı! Oyun berabere.\")\n break\n else:\n print(\"Kazandınız!\")\n break\n else:\n ek_kart = input(\"Kart ya da Pas (k/p):\")\n while ek_kart.lower() == \"p\":\n print(\"Sıra dağıtıcıda.\")\n print(\"Dağıtıcının kartları {} {}, {} {}\".format(dgt_kartlar[0][0], dgt_kartlar[0][1],\n dgt_kartlar[1][0], dgt_kartlar[1], [1]), end=\"\")\n print(\" (toplam {})\".format(dagitici_kart_toplamlari(dgt_kartlar)))\n while dagitici_kart_toplamlari(dgt_kartlar) < 17:\n yeni_dgt_kart = dagitici_kart_cekme()\n dgt_kartlar.append(yeni_dgt_kart)\n print(\"Dağıtıcının kartları {}\".format(dgt_kartlar), end=\"\")\n print(\" (toplam {})\".format(dagitici_kart_toplamlari(dgt_kartlar)))\n if dagitici_kart_toplamlari(dgt_kartlar) > 21:\n print(\"Dağıtıcı battı. Kazandınız!\")\n break\n elif dagitici_kart_toplamlari(dgt_kartlar) == 21:\n if oyuncu_kart_toplamlari(oyn_kartlar)[0] == 21 or oyuncu_kart_toplamlari(oyn_kartlar)[1] == 21:\n print(\"Dağıtıcı blackjack yaptı! Oyun berabere.\")\n break\n else:\n print(\"Dağıtıcı blackjack yaptı! Kaybettiniz.\")\n break\n elif dagitici_kart_toplamlari(dgt_kartlar) >= 17:\n if oyuncu_kart_toplamlari(oyn_kartlar)[0] > dagitici_kart_toplamlari(dgt_kartlar) or \\\n oyuncu_kart_toplamlari(oyn_kartlar)[1] > dagitici_kart_toplamlari(dgt_kartlar):\n print(\"Kazandınız!\")\n break\n elif (oyuncu_kart_toplamlari(oyn_kartlar)[0] or oyuncu_kart_toplamlari(oyn_kartlar)[1]) == \\\n dagitici_kart_toplamlari(dgt_kartlar):\n print(\"Oyun berabere.\")\n break\n else:\n print(\"Dağıtıcı kazandı.\")\n break\n tekrar = input(\"Tekrar oynamak istiyor musunuz? (e/h):\")\n harfler2 = [\"e\", \"E\", \"h\", \"H\"]\n while tekrar not in harfler2:\n tekrar = input(\"Tekrar oynamak istiyor musunuz? (e/h):\")\n print(\"\")\n\nblackjack_21()\n","sub_path":"blackjack.py","file_name":"blackjack.py","file_ext":"py","file_size_in_byte":9079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"641026724","text":"import networkx as nx\nimport matplotlib.pyplot as plt\nG=nx.DiGraph()\n\nres=int(input(\"Enter the target concentration value .. \\n\"))\nval=[float(i) for i in range(0,1025)]\n#Populating the array with values\ncount=0\nlow=float(input(\"Enter the buffer concentration value .. \\n\"))\nhigh=float(input(\"Enter the sample concentration value .. \\n\"))\n#Setting the buffer and solute sample conc and counter\nl=input(\"Enter the lower bound numbers followed by spaces\\n\")\nlb=list(map(int,l.split()))\n\nh=input(\"Enter the upper bound numbers followed by spaces\\n\")\nub=list(map(int,h.split()))\n\n\nfor i in range(0,len(ub)):\n mid=((ub[i]+lb[i])/2)\n count=count+1\n w=input(\"Enter the weight of the graph \\t\")\n G.add_edge(ub[i],mid,weight=w,color='b')\n G.add_edge(lb[i],mid,weight=w,color='g')\n \n #FOR GRAPH COLORING\n pos = nx.circular_layout(G)\n edges = G.edges()\n colors = [G[u][v]['color'] for u,v in edges]\n nx.draw(G, pos=pos, edges=edges, edge_color=colors,with_labels=True,width=1,font_family='sans-serif')\n plt.figure()\n plt.show()\n \n #FOR GRAPH COLORING\n \n print(\"The target is\",mid,\"in\",count,\"number of steps \\n\")\n\np=G.nodes()\nfor i in p:\n if((G.out_degree(i)-G.in_degree(i))%2!=0 and (-(G.in_degree(i))and G.out_degree(i))):\n print(\"Waste found at position\",i)\n e=int(input(\"Enter the percentage of error: \\t\"))\n \n e1=1+e/100\n e2=1-e/100\n w1=list(G.successors(i))\n #print(w1)\n w2=list(G.predecessors(w1[0]))\n #print(w2)\n w2.remove(i)\n #print(w2)\n pe=((i*e1)+(w2[0]*1))/(1+e1)\n ne=((i*e2)+(w2[0]*1))/(1+e2)\n \n print(\"Possitive error: \\t \",pe)\n print(\"Negative error: \\t\",ne)\n \nnx.draw_circular(G,with_labels=True)","sub_path":"Codes/idma-digraph-error.py","file_name":"idma-digraph-error.py","file_ext":"py","file_size_in_byte":1777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"92195482","text":"from datetime import datetime\nfrom threading import Thread\nfrom time import sleep\nimport datetime\nimport errno\nimport os\nimport re\n\n\ndef write_log(level, msg):\n cur_time = datetime.datetime.now()\n with open('./task.log', mode='a+', encoding='utf8') as file:\n s = \"[\" + str(cur_time) + \"][\" + level + \"] \" + msg\n file.write(s + \"\\n\")\n\n\nclass TaskTimer:\n __instance = None\n\n def __new__(cls, *args, **kwargs):\n # 单例模式\n if not cls.__instance:\n cls.__instance = object.__new__(cls)\n return cls.__instance\n\n def __init__(self):\n self.task_queue = []\n\n def timing_task(self, task):\n curTime = datetime.datetime.now()\n if task['day'] != 0:\n desTime = curTime.replace(day=curTime.day + task['day'], hour=0, minute=0, second=0)\n elif task['hour'] != 0:\n desTime = curTime.replace(day=curTime.day, hour=curTime.hour + task['hour'], minute=0, second=0)\n elif task['minute'] != 0:\n desTime = curTime.replace(day=curTime.day, hour=curTime.hour, minute=curTime.minute + task['minute'],\n second=0)\n else:\n write_log(\"异常\", \"参数输入错误:\" + task['fun'].__name__ + \" 已执行\")\n\n delta_sec = (desTime - curTime).total_seconds()\n print(\"Next minute must sleep %d seconds\" % delta_sec)\n sleep(delta_sec)\n try:\n task['fun'](*task['args'])\n write_log(\"正常\", \"定时任务:\" + task['fun'].__name__ + \" 已执行\")\n except Exception as e:\n write_log(\"异常\", \"定时任务:\" + task['fun'].__name__ + \" 函数内部异常:\" + str(e))\n print(e)\n\n def add_task(self, fun, args): # , day=0, hour=0, minute=0\n task = {\n 'fun': fun,\n 'args': args,\n 'day': args[0],\n 'hour': args[1],\n 'minute': args[2]\n }\n self.task_queue.append(task)\n\n def work(self):\n while True:\n for task in self.task_queue:\n self.timing_task(task)\n\n def start(self):\n thread = Thread(target=self.work)\n thread.start()\n\n\nbasedir_log = '../logs/'\n\n\ndef make_split_file(day=0, hour=0, minute=0):\n if day is not 0:\n log_dir = '%Y-%m-%d'\n now = datetime.datetime.now() - datetime.timedelta(days=1)\n elif hour is not 0:\n log_dir = '%Y-%m-%d_%H'\n now = datetime.datetime.now() - datetime.timedelta(hours=1)\n elif minute is not 0:\n log_dir = '%Y-%m-%d_%H-%M'\n now = datetime.datetime.now() - datetime.timedelta(minutes=1)\n else:\n return False\n\n log_filename_all = log_dir + '_all.log'\n log_level_info = '_info.log'\n log_level_warning = '_warning.log'\n log_level_error = '_error.log'\n dirc = basedir_log + now.strftime(log_dir)\n\n if os.path.exists(dirc):\n log_today_all = \"%s%s\" % (dirc + \"/\", now.strftime(log_filename_all))\n log_today_info = \"%s%s\" % (dirc + \"/\", now.strftime(log_dir + log_level_info))\n log_today_warning = \"%s%s\" % (dirc + \"/\", now.strftime(log_dir + log_level_warning))\n log_today_error = \"%s%s\" % (dirc + \"/\", now.strftime(log_dir + log_level_error))\n try:\n fd_info = os.open(log_today_info, os.O_CREAT | os.O_EXCL)\n fd_warning = os.open(log_today_warning, os.O_CREAT | os.O_EXCL)\n fd_error = os.open(log_today_error, os.O_CREAT | os.O_EXCL)\n # if coming here, the log file was created successfully\n os.close(fd_info)\n os.close(fd_warning)\n os.close(fd_error)\n\n except OSError as e:\n if e.errno != errno.EEXIST:\n # should not happen\n raise\n return log_today_all, log_today_info, log_today_warning, log_today_error\n\n else:\n write_log('error!!! no ' + dirc)\n return False\n\n\ndef split_logfile(day=0, hour=0, minute=0):\n file_path = make_split_file(day, hour, minute)\n print(file_path)\n if file_path:\n with open(file_path[1], 'w+') as f1, open(file_path[2], 'w+') as f2, open(file_path[3], 'w+') as f3:\n for line in open(file_path[0]): # \"../logs/2019-04-18_17-30/2019-04-18_17-30_all.log\"\n level = re.compile(\" \").sub('', line.split('-')[6])\n if level == 'INFO':\n f1.write(line)\n elif level == 'WARNING':\n f2.write(line)\n elif level == 'ERROR':\n f3.write(line)\n else:\n write_log('No such log !')\n\n\ntimer = TaskTimer()\ntimer.add_task(split_logfile, [0, 0, 1])\ntimer.start()\n","sub_path":"logtest02/config/Split_Log.py","file_name":"Split_Log.py","file_ext":"py","file_size_in_byte":4703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"26131470","text":"import tornado.ioloop\nfrom tornado.web import RequestHandler, Application\n\n\nclass IndexHandler(RequestHandler):\n\n def get(self):\n self.write(\"Stay young stay simple.\")\n\n\ndef make_app():\n return Application([\n (r\"/\", IndexHandler),\n ])\n\n\nif __name__ == \"__main__\":\n app = make_app()\n app.listen(8888)\n tornado.ioloop.IOLoop.current().start()\n","sub_path":"acolyte_test/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"642544318","text":"from threading import Thread\nimport time\n\n\nclass Error(Exception):\n pass\n\n\nclass Timeouted(Error):\n def __init__(self):\n pass\n\n\nclass Timer(Thread):\n def __init__(self, to):\n Thread.__init__(self)\n self.time = to\n self.done = 0\n\n def run(self):\n time.sleep(self.time)\n if self.done == 0:\n raise Timeouted\n\n def stop(self):\n self.done = 1\n\n\nclass ThreadController(Thread):\n @staticmethod\n def log_debug(*args):\n j = ''\n for i in args:\n j += str(i)\n print(\"[DEBUG] \" + str(j))\n\n def __init__(self):\n Thread.__init__(self)\n self.threads = []\n\n def run(self):\n self.watcher()\n\n def watcher(self):\n while True:\n time.sleep(1)\n p = 0\n for i in self.threads:\n if i.readiness == 1:\n self.threads.remove(i)\n i.join()\n del i\n p = p + 1\n","sub_path":"dermod/threads.py","file_name":"threads.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"185355392","text":"from kspkerror import kspkerror\r\n\r\nclass ksp:\r\n def __init__(self):\r\n self.igrac1=None\r\n self.igrac2=None\r\n\r\n\r\n def play(self):\r\n self.igrac1=input('Igrač1 Unesite kamen, škare ili papir:')\r\n self.igrac2=input('Igrač2 Unesite kamen, škare ili papir:')\r\n\r\n if self.igrac1=='papir' and self.igrac2=='škare':\r\n print('igrac2 pobijedio')\r\n elif self.igrac1=='papir' and self.igrac2=='kamen':\r\n print('igrac1 pobijedio')\r\n elif self.igrac1=='papir' and self.igrac2=='papir':\r\n print('neriješeno')\r\n elif self.igrac1 =='škare' and self.igrac2=='škare':\r\n print('neriješeno')\r\n elif self.igrac1=='škare' and self.igrac2=='papir':\r\n print('igrac1 pobijedio')\r\n elif self.igrac1=='škare' and self.igrac2=='kamen': \r\n print('igrac2 pobijedio') \r\n elif self.igrac1=='kamen' and self.igrac2=='kamen':\r\n print('nerijšeno')\r\n elif self.igrac1=='kamen' and self.igrac2=='papir':\r\n print('igrac2 pobijedio')\r\n elif self.igrac1=='kamen' and self.igrac2=='škare':\r\n print('igrac1 pobijedio')\r\n else:\r\n raise kspkerror(101) \r\n\r\nif __name__ == \"__main__\":\r\n game = ksp()\r\n game.play()","sub_path":"Zadaca 3/kspkklase.py","file_name":"kspkklase.py","file_ext":"py","file_size_in_byte":1310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"378904004","text":"# coding: utf-8\n\nimport codecs\nimport json\nimport pickle\nimport numpy as np\nfrom multiprocessing import Pool\n\nsave_path = './'\ny_file_test = 'y_test.npy'\ny_file_valid = 'y_valid.npy'\ny_file_train = 'y_train.npy'\n\nraw_file_test = \"data_test.json\"\nraw_file_valid = 'data_valid.json'\nraw_file_train = 'bigdata_delete_smalltestvalid.json'\n\nfile_fact_test_dict = 'idfact_dict_test.pkl'\nfile_fact_valid_dict = 'idfact_dict_valid.pkl'\nfile_fact_train_dict = 'idfact_dict_train.pkl'\n\n\ndef read_data(raw_file, file_y, file_fact_dict):\n i = 0\n facts_dict = {}\n\n f_lines = codecs.open(raw_file, 'r', 'utf-8').readlines()\n accusation_all = []\n relevant_articles_all = []\n death_penalty_all = []\n imprisonment_all = []\n life_imprisonment_all = []\n print(len(f_lines)) # 748203\n\n for line in f_lines:\n if i % 1000 == 0:\n print(i)\n # print(type(line)) # str\n case = json.loads(line)\n fact_str = case['fact']\n criminals = case['meta']['criminals']\n fact_str = fact_str.replace(criminals[0], ',')\n facts_dict[i] = fact_str\n\n # accu\n accusation = case['meta']['accusation']\n accusation_all.append(accusation)\n # arti\n relevant_articles = []\n for articles in case['meta']['relevant_articles']:\n relevant_articles.append(int(articles))\n relevant_articles_all.append(relevant_articles)\n death_penalty = case['meta']['term_of_imprisonment']['death_penalty']\n death_penalty_all.append(death_penalty)\n imprisonment = case['meta']['term_of_imprisonment']['imprisonment']\n imprisonment_all.append(imprisonment)\n life_imprisonment = case['meta']['term_of_imprisonment']['life_imprisonment']\n life_imprisonment_all.append(life_imprisonment)\n\n i += 1\n # if i == 50:\n # break\n # p = Pool()\n\n # accu\n accusation_all = np.asarray(accusation_all)\n # accu_id = np.asarray(list(p.map(get_id4accus, accusation_all)))\n print('accusation_all', accusation_all[0:5])\n\n # relevant_articles\n relevant_articles_all = np.asarray(relevant_articles_all)\n # rel_id = np.asarray(list(p.map(get_id4laws, relevant_articles_all)))\n print('relevant_articles_all ', relevant_articles_all[0:5])\n # death_penalty\n print('death_penalty_all', death_penalty_all[0:5])\n # imprisonment\n print('imprisonment_all', imprisonment_all[0:5])\n # life_imprisonment\n print('life_imprisonment_all', life_imprisonment_all[0:5])\n # save all\n labels = [accusation_all, relevant_articles_all, death_penalty_all, imprisonment_all, life_imprisonment_all]\n np.save(save_path+file_y, labels)\n print('save to ', save_path+file_y)\n\n print('save facts_dict', save_path + file_fact_dict)\n with open(save_path + file_fact_dict, 'wb') as outp:\n pickle.dump(facts_dict, outp)\n\n\nif __name__ == '__main__':\n # read_data(raw_file_valid, y_file_valid, file_fact_valid_dict)\n # read_data(raw_file_test, y_file_test, file_fact_test_dict)\n read_data(raw_file_train, y_file_train, file_fact_train_dict)\n\n\"\"\"\n\n\"\"\"","sub_path":"capsule_biblosa/models/text_classification/models/process_data/fact2dic_label.py","file_name":"fact2dic_label.py","file_ext":"py","file_size_in_byte":3117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"554216639","text":"from app import app, database\nfrom flask import render_template, redirect, flash\nfrom .forms import NewDepositForm\nfrom datetime import date\n\n\n@app.route('/')\n@app.route('/index')\ndef index():\n deposits_due = database.get_due_deposits(next_x_days=60)\n today = date.today()\n return render_template('index.html', all_deposits=deposits_due, todays_date=today)\n\n\n@app.route('/newdeposit', methods=['GET', 'POST'])\ndef newdeposit():\n form = NewDepositForm()\n if form.validate_on_submit():\n flash('Bank=\"%s\", Account No=%s' %\n (form.bank.data, str(form.account_no.data)))\n\n database.add_deposit(bank=form.bank.data, account_no=form.account_no.data, account_type=form.account_type.data,\n rate_of_interest=form.rate_of_interest.data,\n date_of_investment=form.date_of_investment.data, due_date=form.due_date.data,\n invested_value=form.invested_value.data, investor=form.investor.data)\n return redirect('/deposits')\n return render_template('newdeposit.html', form=form)\n\n\n@app.route('/deposits')\ndef deposits():\n deposits_from_db = database.get_deposits()\n return render_template('deposits.html', all_deposits=deposits_from_db)\n\n\n@app.route('/deposits/')\ndef deposit_details(account_no):\n return render_template('deposit_details.html', account_no=account_no)\n","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"487609832","text":"from setuptools import setup, find_packages\n\nversion = '0.1'\n\nsetup(\n name='ckanext-multilinguality',\n version=version,\n description=\"A CKAN extension that modifies recline and provides api calls for multilinguality purposes\",\n long_description=\"\"\"\\\n \"\"\",\n classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers\n keywords='ckan, multilinguality, preview',\n author='Me',\n author_email='',\n url='',\n license='AGPL',\n packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),\n namespace_packages=['ckanext', 'ckanext.multilinguality'],\n include_package_data=True,\n zip_safe=False,\n install_requires=[\n ],\n entry_points=\\\n \"\"\"\n [ckan.plugins]\n recline_multilinguality=ckanext.multilinguality.plugin:ReclinePreviewMultilinguality\n \"\"\",\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"409580102","text":"from django.conf.urls.defaults import patterns, include, url\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\n\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'skeletor.views.home', name='home'),\n # url(r'^skeletor/', include('skeletor.foo.urls')),\n\n url(r'^$', 'core.views.index'),\n url(r'^login$', 'django.contrib.auth.views.login', {'template_name':'login.html'}),\n url(r'^logout$', 'django.contrib.auth.views.logout_then_login', {'login_url':'/'}),\n url(r'^core/', include('core.urls')),\n url(r'^hr/', include('hr.urls')),\n url(r'^pm/', include('pm.urls')),\n #url(r'^track/', include('track.urls')),\n\n # Uncomment the admin/doc line below to enable admin documentation:\n # url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n url(r'^admin/', include(admin.site.urls)),\n)\n\n# Note: this only gets activated if DEBUG = True\nurlpatterns += staticfiles_urlpatterns()","sub_path":"people/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"581559560","text":"# Можно задавать много точек с источниками и давлениями. Можно задавать распределение давления в трещине. Если задавать только давления (граничные условия) то задача устойчива при любых шагах времени и координаты,\r\n# если задавать еще источники, то задача устойчива при каком-то соотношении t_step и hx, граничное условие-новое, градиента давления на границе равен 0.\r\nimport numpy as np\r\nimport matplotlib as mpl\r\nimport matplotlib.pyplot as plt\r\nfrom scipy import interpolate\r\nfrom matplotlib import cm\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\n\r\n# если в скважинах задавать расход, а не давление, то задача НЕ всегда устойчива. Нужно подбирать шаги повремени и по координатам.\r\nif __name__ == '__main__':\r\n alpha = 0.8*10**-12\r\n beta = 0.17*10**-9\r\n hx = 0.01\r\n hy = 0.01\r\n hz = 0.07\r\n\r\n t_step = 0.01\r\n T_exp = 100\r\n Lx = 0.5\r\n Ly = 0.5\r\n\r\n N = int(Lx/hx) # количество ячеек вдоль оси х\r\n M = int(Ly/hy)\r\n print(N,M)\r\n\r\n #wells_with_Q = {}\r\n wells_with_P = {(int((Lx/2+0.121)/hx),int((Ly/2+0.121)/hy)): 20*10**5, (int((Lx/2-0.121)/hx), int((Ly/2-0.121)/hy)): 1*10**5}\r\n frac_with_P = {(int(N/2), int(M/2)):25*10**5, (int(N/2)-1, int(M/2)):24*10**5, (int(N/2)+1, int(M/2)):24*10**5, (int(N/2)-2, int(M/2)):23*10**5, (int(N/2)+2, int(M/2)):23*10**5, (int(N/2)-3, int(M/2)):22*10**5, (int(N/2)+3, int(M/2)):22*10**5, (int(N/2)+4, int(M/2)):21*10**5, (int(N/2)-4, int(M/2)):21*10**5}\r\n wells_with_Q = {(int((Lx/2)/hx),int((Ly/2-0.121)/hy)): -0.00000003}\r\n #wells_with_P = {(int((Lx/2+0.121)/hx),int((Ly/2+0.121)/hy)): 20*10**5, (int((Lx/2-0.121)/hx), int((Ly/2-0.121)/hy)): 1*10**5, (int((Lx/2-0.121)/hx), int((Ly/2)/hy)): 5*10**5, (int((Lx/2+0.121)/hx), int((Ly/2)/hy)): 5*10**5, (int((Lx/2)/hx), int((Ly/2-0.121)/hy)): 5*10**5, (int((Lx/2)/hx), int((Ly/2+0.121)/hy)): 5*10**5}\r\n #frac_with_P = {}\r\n\r\n Pres = 1*10**5 # давление в пласте\r\n\r\n V = hx*hy*hz\r\n coeff_1 = hx*hz/hy\r\n coeff_2 = hy*hz/hx\r\n Pres_distrib = np.ones((N, M)) * Pres\r\n\r\ndef PorePressure_in_Time(alpha, beta, t_step, N, M, wells_with_Q, wells_with_P, frac_with_P, Pres, V, coeff_1, coeff_2, Pres_distrib):\r\n # пластовое давление во всей области на нулевом временном шаге\r\n indic = []\r\n P_total = np.ones((N, 1)) * Pres\r\n for m in range(0,M):\r\n A = np.zeros((N,N))\r\n B = np.zeros((N,1))\r\n\r\n for n in range(1, N-1):\r\n A[n][n-1] = alpha*coeff_2\r\n A[n][n] = (-2*coeff_2*alpha - V*beta/t_step)\r\n A[n][n+1] = alpha*coeff_2\r\n\r\n A[0][0] = -2*coeff_2*alpha - V*beta/t_step\r\n A[0][1] = 2*alpha*coeff_2\r\n A[N-1][N-1] = A[0][0]\r\n A[N-1][N-2] = A[0][1]\r\n\r\n for n in range(0,N):\r\n if m == 0:\r\n B[n][0] = -V*beta/t_step*Pres_distrib[n][m]- alpha*coeff_1*(- 2*Pres_distrib[n][m] + 2*Pres_distrib[n][m+1])\r\n\r\n elif m == M-1:\r\n B[n][0] = -V*beta/t_step*Pres_distrib[n][m]- alpha*coeff_1*(2*Pres_distrib[n][m-1] - 2*Pres_distrib[n][m])\r\n\r\n else:\r\n B[n][0] = -V*beta/t_step*Pres_distrib[n][m]- alpha*coeff_1*(Pres_distrib[n][m-1] - 2*Pres_distrib[n][m] + Pres_distrib[n][m+1])\r\n\r\n for coord_key in wells_with_Q:\r\n if (n,m) == coord_key:\r\n B[n][0] = -V*beta/t_step*Pres_distrib[n][m]- alpha*coeff_1*(Pres_distrib[n][m-1] - 2*Pres_distrib[n][m] + Pres_distrib[n][m+1]) + wells_with_Q[coord_key]\r\n\r\n for n in range(0, N):\r\n\r\n for coord_key in wells_with_P:\r\n if (n,m) == coord_key:\r\n indic.append(coord_key)\r\n elif (n-1,m) == coord_key:\r\n A[n][n - 1] = 0\r\n B[n][0] = B[n][0] - alpha * coeff_2 * wells_with_P[coord_key]\r\n elif (n+1,m) == coord_key:\r\n A[n][n+1] = 0\r\n B[n][0] = B[n][0] - alpha * coeff_2 * wells_with_P[coord_key]\r\n\r\n for coord_key_fr in frac_with_P:\r\n if (n,m) == coord_key_fr:\r\n indic.append(coord_key_fr)\r\n elif (n-1,m) == coord_key_fr:\r\n A[n][n - 1] = 0\r\n B[n][0] = B[n][0] - alpha * coeff_2 * frac_with_P[coord_key_fr]\r\n elif (n+1,m) == coord_key_fr:\r\n A[n][n+1] = 0\r\n B[n][0] = B[n][0] - alpha * coeff_2 * frac_with_P[coord_key_fr]\r\n\r\n #print(type(indic))\r\n if indic != []:\r\n counter = 0\r\n indic.sort()\r\n for element in indic:\r\n A = np.delete(A, element[0]-counter, axis=0)\r\n A = np.delete(A, element[0]-counter, axis=1)\r\n B = np.delete(B, element[0]-counter)\r\n counter += 1\r\n\r\n\r\n P_new = np.linalg.solve(A,B)\r\n#\r\n if indic != []:\r\n\r\n for element in indic:\r\n if element in wells_with_P:\r\n P_new = np.insert(P_new,element[0],wells_with_P[element])\r\n else:\r\n P_new = np.insert(P_new, element[0], frac_with_P[element])\r\n\r\n indic = []\r\n P_new = P_new.reshape(N, 1)\r\n\r\n P_total = np.hstack((P_total,P_new))\r\n\r\n P_total = np.delete(P_total, 0, axis=1)\r\n\r\n Pres_distrib = np.array(P_total.copy())\r\n\r\n\r\n#---------------------------------------------------------------------------\r\n indic = []\r\n\r\n P_total = np.ones((1, M)) * Pres\r\n for n in range(0, N):\r\n A = np.zeros((M, M))\r\n B = np.zeros((M, 1))\r\n for m in range(1, M - 1):\r\n A[m][m - 1] = alpha * coeff_1\r\n A[m][m] = (-2 * coeff_1 * alpha - V * beta / t_step)\r\n A[m][m + 1] = alpha * coeff_1\r\n A[0][0] = -2 * coeff_1 * alpha - V * beta / t_step\r\n A[0][1] = 2*alpha * coeff_1\r\n A[M - 1][M - 1] = A[0][0]\r\n A[M - 1][M - 2] = A[0][1]\r\n\r\n for m in range(0, M):\r\n if n == 0:\r\n B[m][0] = -V * beta / t_step * Pres_distrib[n][m] - alpha * coeff_2 * (-2*Pres_distrib[n][m] + 2*Pres_distrib[n+1][m])\r\n\r\n elif n == N-1:\r\n B[m][0] = -V * beta / t_step * Pres_distrib[n][m] - alpha * coeff_2 * (-2*Pres_distrib[n][m] + 2 * Pres_distrib[n - 1][m])\r\n\r\n else:\r\n B[m][0] = -V * beta / t_step * Pres_distrib[n][m] - alpha * coeff_2 *(Pres_distrib[n-1][m] - 2 * Pres_distrib[n][m] + Pres_distrib[n+1][m])\r\n\r\n for coord_key in wells_with_Q:\r\n if (n, m) == coord_key:\r\n B[m][0] = -V * beta / t_step * Pres_distrib[n][m] - alpha * coeff_2 * (Pres_distrib[n][m - 1] - 2 * Pres_distrib[n][m] + Pres_distrib[n][m + 1]) + wells_with_Q[coord_key]\r\n\r\n\r\n for m in range(0, M):\r\n for coord_key in wells_with_P:\r\n if (n,m) == coord_key:\r\n indic.append(coord_key)\r\n\r\n elif (n,m-1) == coord_key:\r\n A[m][m - 1] = 0\r\n B[m][0] = B[m][0] - alpha * coeff_1 * wells_with_P[coord_key]\r\n\r\n elif (n,m+1) == coord_key:\r\n A[m][m + 1] = 0\r\n B[m][0] = B[m][0] - alpha * coeff_1 * wells_with_P[coord_key]\r\n\r\n for coord_key_fr in frac_with_P:\r\n if (n,m) == coord_key_fr:\r\n indic.append(coord_key_fr)\r\n elif (n,m-1) == coord_key_fr:\r\n A[m][m - 1] = 0\r\n B[m][0] = B[m][0] - alpha * coeff_1 * frac_with_P[coord_key_fr]\r\n elif (n,m+1) == coord_key_fr:\r\n A[m][m+1] = 0\r\n B[m][0] = B[m][0] - alpha * coeff_1 * frac_with_P[coord_key_fr]\r\n\r\n\r\n if indic != []:\r\n counter = 0\r\n indic.sort()\r\n for element in indic:\r\n A = np.delete(A, element[1]-counter, axis=0)\r\n A = np.delete(A, element[1]-counter, axis=1)\r\n B = np.delete(B, element[1]-counter)\r\n counter += 1\r\n\r\n P_new = np.linalg.solve(A,B)\r\n\r\n if indic != []:\r\n\r\n for element in indic:\r\n if element in wells_with_P:\r\n P_new = np.insert(P_new, element[1], wells_with_P[element])\r\n else:\r\n P_new = np.insert(P_new, element[1], frac_with_P[element])\r\n\r\n indic = []\r\n P_new = P_new.reshape(M, 1)\r\n\r\n\r\n P_total = np.vstack((P_total, P_new.T))\r\n\r\n P_total = np.delete(P_total, 0, axis=0)\r\n Pres_distrib = np.array(P_total.copy())\r\n\r\n\r\n return P_total\r\n\r\n#----------------------------------------------------------------------------\r\n\r\nif __name__ == '__main__':\r\n for t in range(T_exp):\r\n P_total = PorePressure_in_Time(alpha, beta, t_step, N, M, wells_with_Q, wells_with_P, frac_with_P, Pres, V, coeff_1, coeff_2, Pres_distrib)\r\n Pres_distrib = P_total\r\n\r\n X = np.zeros((N,M))\r\n Y = np.zeros((N, M))\r\n for m in range(M):\r\n for n in range(N):\r\n X[n][m] = n*hx\r\n Y[n][m] = m*hy\r\n\r\n X_list = [i for i in X.flat]\r\n Y_list = [j for j in Y.flat]\r\n P_list = [k for k in P_total.flat]\r\n\r\n\r\n CP_list = zip(X_list, Y_list, P_list)\r\n\r\nif __name__ == '__main__':\r\n print(min(P_list), max(P_list))\r\n\r\n xi = np.linspace(min(X_list),max(X_list), 700)\r\n yi = np.linspace(min(Y_list), max(Y_list), 700)\r\n xig, yig = np.meshgrid(xi, yi)\r\n Pi = interpolate.griddata((X_list,Y_list), P_list, (xig, yig), method='cubic')\r\n\r\n levels = list(range(0,2600000,50000))\r\n fig = plt.figure()\r\n surf = plt.contourf(xig, yig, Pi, cmap=cm.jet, antialiased=True, vmin=np.nanmin(Pi), vmax=np.nanmax(Pi),linewidth=0.2, levels=levels)\r\n\r\n t = np.arange(0, 2 * np.pi, 0.01)\r\n r = 0.215\r\n plt.plot(r * np.sin(t) + Lx/2, r * np.cos(t) + Ly/2)\r\n #ax = fig.gca(projection='3d')\r\n\r\n #surf = ax.plot_surface(xig, yig, Pi, cmap=cm.jet, antialiased=True, vmin=np.nanmin(Pi), vmax=np.nanmax(Pi), linewidth=0.2)\r\n fig.colorbar(surf, shrink=0.5, aspect=5)\r\n\r\n plt.show()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n#","sub_path":"piezo_for_wells_and_frac_new_BC.py","file_name":"piezo_for_wells_and_frac_new_BC.py","file_ext":"py","file_size_in_byte":10572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"382775935","text":"#!/usr/bin/env python3\nimport connexion\nimport datetime\nimport logging\n\nfrom connexion import NoContent\n\n# our memory-only api storage\nAPIS = {}\n\n\ndef get_apis(limit, api_type=None):\n return [api for api in APIS.values() if not api_type or api['api_type'] == api_type][:limit]\n\n\ndef get_api(api_id):\n api = APIS.get(api_id)\n return api or ('Not found', 404)\n\n\ndef put_api(api_id, api):\n exists = api_id in APIS\n api['id'] = api_id\n if exists:\n logging.info('Updating api %s..', api_id)\n APIS[api_id].update(api)\n else:\n logging.info('Creating api %s..', api_id)\n api['created'] = datetime.datetime.utcnow()\n APIS[api_id] = api\n return NoContent, (200 if exists else 201)\n\n\ndef delete_api(api_id):\n if api_id in APIS:\n logging.info('Deleting api %s..', api_id)\n del APIS[api_id]\n return NoContent, 204\n else:\n return NoContent, 404\n\n\nlogging.basicConfig(level=logging.INFO)\napp = connexion.App(__name__)\napp.add_api('swagger.yaml')\n# set the WSGI application callable to allow using uWSGI:\n# uwsgi --http :8080 -w app\napplication = app.app\n\nif __name__ == '__main__':\n # run our standalone gevent server\n app.run(port=8080, server='gevent')","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"251153729","text":"\"\"\"\n Following codebase is the re-implementation to conduct experiments for \n the paper titled 'Value Alignment Verification (http://proceedings.mlr.press/v139/brown21a/brown21a.pdf)'\n The cases of (explicit human, explicit robot) and (explicit human implicit robot) are implemented\n\"\"\"\n\n# Import the required libraries\nimport copy\nimport scipy\nimport random\nimport numpy as np \nimport math, os, sys\n\n# Class to create Discrete Environment for (explicit human, explicit robot), (explicit human, implicit robot) setting\nclass DiscreteEnvironment():\n\n\tdef __init__(self, env_tolerance = 1e-9, env_gamma = 0.9, env_dtype = np.float32):\n\n\t\t\"\"\"\n\t\t\tenv_tolerance: Tolerance value while computing the optimal value function in value iteration\n\t\t\tenv_gamma: Discount factor for value iteration\n\t\t\tenv_dtype: Data type used\n\t\t\"\"\"\n\n\t\tself.tolerance = env_tolerance\n\t\tself.gamma = env_gamma\n\t\tself.dtype = env_dtype\n\n\t# Function to print the policy of an agent\n\tdef print_policy(self, reverse=False):\n\t\tif self.policy is None:\n\t\t\tprint('Please initialize a policy OR run value iteration!')\n\t\t\treturn\n\t\t\n\t\tprint(\"\\nPrinting the Policy:\\n\")\n\t\tself.policy = dict(sorted(self.policy.items()))\n\t\tfor state, actions in list(self.policy.items()):\n\t\t\tprint_length = 0\n\t\t\tfor a in actions:\n\t\t\t\tif a is None:\n\t\t\t\t\tprint(self.action_to_text['None'], end = \"\")\n\t\t\t\t\tprint_length += 3\n\t\t\t\t\tcontinue\n\t\t\t\tprint(self.action_to_text[a], end=\"\")\n\t\t\t\tprint_length += len(self.action_to_text[a])\n\t\t\tif self.diagonal:\n\t\t\t\tprint(\" \" * (24 - print_length), end=\"\")\n\t\t\telse:\n\t\t\t\tprint(\" \" * (15 - print_length), end=\"\")\n\n\t\t\tif state[1] % self.size[1] == self.size[1] - 1:\n\t\t\t\tprint(\"\\n\")\n\n\t# Function to obtain the next state given the current state and action\n\tdef next_state(self, state, action):\n\n\t\tif action == None:\n\t\t\treturn state\n\n\t\tnext_state = tuple(np.array(state) + self.action_to_vec[action])\t\t\n\t\tnext_state = (int(next_state[0]), int(next_state[1]))\t\n\t\t\t\n\t\tif next_state[0] < 0 or next_state[0] >= self.size[0] or next_state[1] < 0 or next_state[1] >= self.size[1]:\n\t\t\treturn state\n\t\treturn next_state \t \n\t\n\t# Function for value iteration and obtaining the policy of an agent\n\tdef value_iteration(self):\n\t\tvalue_function = np.random.rand(self.size[0], self.size[1])\n\t\tvalue_function[self.terminal_state[0]][self.terminal_state[1]] = 0\n\t\tpolicy = {} \n\n\t\t# Value iteration algorithm\n\t\twhile True:\n\t\t\tdelta = 0\n\t\t\tfor row in range(self.size[0]):\n\t\t\t\tfor col in range(self.size[1]):\n\t\t\t\t\ts = (row, col)\n\t\t\t\t\tv = value_function[row][col]\n\t\t\t\t\tqvalue = np.zeros(self.num_actions)\n\t\t\t\t\tfor a in range(self.num_actions):\n\t\t\t\t\t\tprob = 1\n\t\t\t\t\t\tns = self.next_state(s, a)\n\t\t\t\t\t\tr = self.get_reward(s)\n\t\t\t\t\t\tqvalue[a] += prob * (r + self.gamma * value_function[ns[0]][ns[1]])\n\n\t\t\t\t\tvalue_function[row][col] = np.max(qvalue)\n\t\t\t\t\tdelta = max(delta, abs(v - value_function[row][col]))\n\t\t\t\t\t\t\n\t\t\tif delta < self.tolerance:\n\t\t\t\tbreak\n\t\t\n\t\t# Finding the optimal policy \n\t\tfor row in range(self.size[0]):\n\t\t\tfor col in range(self.size[1]):\n\t\t\t\ts = (row, col)\n\t\t\t\tif self.terminal_state != None:\n\t\t\t\t\tif s == self.terminal_state:\n\t\t\t\t\t\tpolicy[s] = [None]\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t\n\t\t\t\tqvalue = np.zeros(self.num_actions)\n\t\t\t\tfor a in range(self.num_actions):\n\t\t\t\t\tprob = 1\n\t\t\t\t\tns = self.next_state(s, a)\n\t\t\t\t\tr = self.get_reward(s)\n\t\t\t\t\tqvalue[a] += prob * (r + self.gamma * value_function[ns[0]][ns[1]])\n\t\t\t\t\t\n\t\t\t\topt_qvalue = np.max(qvalue)\n\t\t\t\tpolicy[s] = [x for x in np.where(qvalue == opt_qvalue)[0]]\n\t\t\t\t\t\n\t\treturn value_function, policy\n\n\t# Function to compute the Q value function matrix\n\tdef Q_value_function_matrix(self):\n\n\t\t# NOTE: It is sufficiecnt to get Q values only for the optimal action \n\t\tQ_value_matrix = np.zeros((self.size[0], self.size[1]), dtype = self.dtype)\n\t\tvalue_function_ns = np.zeros((self.size[0], self.size[1]), dtype = self.dtype)\n\t\tfor row in range(self.size[0]):\n\t\t\tfor col in range(self.size[1]):\n\t\t\t\ts = (row, col)\n\t\t\t\topt_action = self.policy[s][0]\n\t\t\t\tprob = 1\n\t\t\t\tns = self.next_state(s, opt_action)\n\t\t\t\tr = self.get_reward(s)\n\t\t\t\tqvalue = prob * (r + self.gamma * self.value_function[ns[0]][ns[1]])\n\t\t\t\tQ_value_matrix[row][col] = qvalue\n\t\t\t\tvalue_function_ns[row][col] = self.value_function[ns[0]][ns[1]]\n\t\t\n\t\treturn Q_value_matrix, value_function_ns","sub_path":"vav-reproduce/environment.py","file_name":"environment.py","file_ext":"py","file_size_in_byte":4259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"603608427","text":"from fabric import tasks\n\nimport repo\nimport fabric_tasks\nfrom os import path\n\ndef remote( Rino ):\n\t\"\"\"Manages remote repositories.\n\n\t\tusage: rino remote\n\t\t\tor: rino remote add \n\t\t\tor: rino remote remove \n\t\t\tor: rino remote init \n\t\"\"\"\n\n\tlogger = Rino.logger\n\targuments = Rino.arguments\n\n\tif \"add\" in arguments[\"_\"]:\n\t\treturn add_remote(arguments, logger)\n\telif \"remove\" in arguments[\"_\"]:\n\t\treturn remove_remote(arguments, logger)\n\telif \"init\" in arguments[\"_\"]:\n\t\treturn init_remote(arguments, logger)\n\telse:\n\t\treturn list_remote(arguments, logger)\n\n\ndef init_remote(arguments, logger):\n\tname = arguments[\"_\"][2]\n\n\tif name is None:\n\t\tlogger.log(\"remote name is not defined. Exiting\", color = \"red\")\n\t\treturn False\n\n\trinofile_json = repo.get_rinofile()\n\n\tif \"remote\" not in rinofile_json:\n\t\trinofile_json[\"remote\"] = []\n\n\tif not name in [x['name'] for x in rinofile_json[\"remote\"]]:\n\t\tlogger.log(\"remote <\" + name + \"> does not exist. Add first.\", color= \"red\")\n\t\treturn False\n\n\thost_full = [x['url'] for x in rinofile_json[\"remote\"] if x['name'] == name][0]\n\thost_url = host_full.split(':')[0]\n\tddir = host_full.split(':')[1]\n\n\ttasks.execute(fabric_tasks.make_remote_repo, ddir, hosts = [host_url])\n\n\ndef add_remote(arguments, logger):\n\tname = arguments[\"_\"][2]\n\turl = arguments[\"_\"][3]\n\n\tif name is None or url is None:\n\t\tlogger.log(\"remote name or url is not defined. Exiting\", color = \"red\")\n\t\treturn False\n\n\trinofile_json = repo.get_rinofile()\n\n\tif \"remote\" not in rinofile_json:\n\t\trinofile_json[\"remote\"] = []\n\n\tif name in [x['name'] for x in rinofile_json[\"remote\"]]:\n\t\tlogger.log(\"remote <\" + name + \"> already exists. Remove first.\", color= \"red\")\n\t\treturn False\n\n\trinofile_json[\"remote\"].append({\n\t\t\t\"name\" : name,\n\t\t\t\"url\" : url,\n\t\t})\n\n\tlogger.log(\"added remote repository \" + name + \" at \"+url+\"\")\n\trepo.write_rinofile(rinofile_json)\n\n\treturn True\n\n\ndef remove_remote(arguments, logger):\n\tname = arguments[\"_\"][2]\n\turl = arguments[\"_\"][3]\n\n\tif name is None:\n\t\tlogger.log(\"remote name is not defined. Exiting\", color = \"red\")\n\t\treturn False\n\n\trinofile_json = repo.get_rinofile()\n\n\tif \"remote\" not in rinofile_json:\n\t\tlogger.log(\"no remote's defined. Exiting\", color = \"red\")\n\t\treturn False\n\n\tif name in [x['name'] for x in rinofile_json[\"remote\"]]:\n\t\trinofile_json[\"remote\"] = [x for x in rinofile_json[\"remote\"] if x['name'] != name]\n\t\tlogger.log(\"removed remote repository \" + name)\n\telse:\n\t\tlogger.log(\"remote <\" + name + \"> does not exist.\", color= \"red\")\n\n\trepo.write_rinofile(rinofile_json)\n\n\treturn True\n\n\ndef list_remote(arguments, logger):\n\n\trinofile_json = repo.get_rinofile()\n\n\tif \"remote\" not in rinofile_json:\n\t\tlogger.log(\"no remote's defined. Exiting\", color = \"red\")\n\t\treturn False\n\n\telse:\n\t\tfor x in rinofile_json[\"remote\"]:\n\t\t\tlogger.log(x['name'] +\"\\t\"+ x['url'], noPrefix = True)","sub_path":"rino/remote.py","file_name":"remote.py","file_ext":"py","file_size_in_byte":2840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"619182602","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# \n# #Linear Regression Portfolio!!!\n\n# These small LinearRegression project aims to apply learned concept from course \"Python for Data Science and Machine Learning Bootcamp\" with Jose Portilla. The exercise aims to provide a data based decision on where should a company apply it efforts in order to raise clients Yearly Amount spent, by investing in AppTime or Time on Website. Data visualisation is made with seaborn library, data work with pandas library, math with numpy and machine learning with sklearn. features were evaluated and shown. The dataset was splitten in two, so there could be a train sample and a test one. The predictions made were evaluated by the sum of squares (R²). The coefficients showed that Time on App had a greater influence on yearly Amount spent. These could lead to the conclusion that the company should invest on keeping its client on App for grater time but also shows that there is room for growth sale by the website.\n\n# In[1]:\n\n\nprint(\"Exercise examples made by Thiago I. Klock as content for portfolio.\")\n\n\n# In[30]:\n\n\nimport pandas as pd #For FataFrame manipulation.\nimport numpy as np #For Math.\nimport matplotlib.pyplot as plt # For plotting.\nimport seaborn as sns #For nice plotting.\n\n\n# In[15]:\n\n\nprint('Reading data base: Ecommerce_Customers_DataBase.')\ncustomers = pd.read_csv(\"Ecommerce_Customers_DataBase\")\n\n\n# In[16]:\n\n\nprint('Reading Data Base head.')\ncustomers.head(5)\n\n\n# In[17]:\n\n\nprint('Describing data basic statistics.')\ncustomers.describe()\n\n\n# In[18]:\n\n\nprint('Checking null values in data base.')\ncustomers.info()\nprint('Non null values found.')\n\n\n# In[59]:\n\n\nsns.set_palette(\"BrBG\")\nsns.set_style('white')\n\n\n# In[62]:\n\n\n# More time on site, more money spent.\nsns.jointplot(x='Time on Website',y='Yearly Amount Spent',data=customers,kind='kde')\n\n\n# In[63]:\n\n\nsns.jointplot(y='Time on App',x='Length of Membership',kind='kde',data=customers)\n\n\n# In[65]:\n\n\nsns.pairplot(customers, kind =\"reg\")\n\n\n# \n# The above image shows graphs in which a linear relation is clear.\n# It can be seen how the features relate between themselves.\n# The pair Yearly Amount Spent vs. Length of Membership stands out, so it should be looked closer.\n\n# In[66]:\n\n\nsns.lmplot(x='Length of Membership',y='Yearly Amount Spent',data=customers)\n\n\n# It is clear the direct relation between the length of Membership and the Yearly Amount Spent.\n\n# Training and Testing Data\n# \n# With the data explored and displayed, the next step should be Training and Testing the Data.\n\n# In[134]:\n\n\ny = customers['Yearly Amount Spent'] # picking just the \"target data\"\n\n\n# In[135]:\n\n\nX = customers[['Avg. Session Length', 'Time on App','Time on Website', 'Length of Membership']] #pickig the features data. Sticking just with the numeric ones.\n\n\n# ** Use model_selection.train_test_split from sklearn to split the data into training and testing sets. Set test_size=0.3 and random_state=101**\n\n# In[79]:\n\n\nfrom sklearn.model_selection import train_test_split #Importing the spliting function\n\n\n# In[81]:\n\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42)\n\n\n# Training the Model with sklearn Linear Regression\n# \n# \n\n# In[82]:\n\n\nfrom sklearn.linear_model import LinearRegression\n\n\n# In[83]:\n\n\nlm = LinearRegression()\n\n\n# In[84]:\n\n\nlm.fit(X_train,y_train) #Fiting the linear Regression\n\n\n# **Print out the coefficients of the model**\n\n# In[90]:\n\n\n\nprint('Coefficients: \\n', lm.coef_)\nprint('Those are the coeficients for the linear regression model.')\n\n\n# In[91]:\n\n\n\n\n\n# Predicting\n# Now that we have fit our model, let's evaluate its performance by predicting off the test values!\n\n# In[92]:\n\n\npredictions = lm.predict( X_test)\n\n\n# In[95]:\n\n\nplt.scatter(y_test,predictions)\nplt.xlabel('Y Test')\nplt.ylabel('Predicted Y')\n\n\n# Evaluating the Model\n# \n# The Model's performance can be evaluated by calculating the residual sum of squares and the explained variance score (R^2).\n\n# In[97]:\n\n\n# calculate these metrics by hand!\nfrom sklearn import metrics\n\nprint('MAE:', metrics.mean_absolute_error(y_test, predictions))\nprint('MSE:', metrics.mean_squared_error(y_test, predictions))\nprint('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, predictions)))\n\n\n# Conclusion:\n# In order to answer the original question of where should the company focus, \"mobile app\" or \"website develpment\" it is necessary to consider the below table where it is possible to see the weight of each feature on the yearly amont spent htat was provided by the Linear Regression model.\n\n# In[132]:\n\n\ncoeffs = pd.DataFrame(lm.coef_,X.columns)\ncoeffs.columns = ['Coeffecient']\nprint(coeffs)\n\n\n# Interpreting the coefficients:\n# \n# - The Coeffecients could be interpreted as: # for each unit increased in **Avg. Session Length** increase of 25.98 total dollars spent happens.\n# # for each unit increased in **Time on App** increase of 38.59 total dollars spent happens.\n# # for each unit increased in **Time on Website** increase of 0.19 total dollars spent happens.\n# # for each unit increased in **Length of Membership** increase of 61.27 total dollars spent happens.\n# \n# Taken the other 3 features are kept equal\n\n# Where should the company invest? App or Website?\n\n# \n# Based on the numbers showed it is possible to affirm the time on App has a greater impact on the client spenditure, pointing that it might give a greater return keeping the client for longer periods. Although the return for time on Website is low, indicating there is much room for growth in the sector, i.e. with low investimenst could be greater returns. Benchmarking could provide data for these decision.\n\n# .\n","sub_path":"Ecommerce_Exercise_NY_Clothing_Selling_Online.py","file_name":"Ecommerce_Exercise_NY_Clothing_Selling_Online.py","file_ext":"py","file_size_in_byte":5872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"512013585","text":"# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n# pyre-unsafe\n\nimport logging\nfrom typing import List, Dict\n\nimport pandas as pd\nfrom kats.models.metalearner.metalearner_modelselect import MetaLearnModelSelect\n\nNUM_SECS_IN_DAY = 3600 * 24\nPARAMS_TO_SCALE_DOWN = {\"n_control\", \"n_test\", \"historical_window\", \"scan_window\"}\n\n\ndef change_dtype(d):\n for elm in d:\n d[elm] = float(d[elm])\n return d\n\n\nclass MetaDetectModelSelect(object):\n def __init__(self, df: pd.DataFrame) -> None:\n if not isinstance(df, pd.DataFrame):\n msg = \"Dataset is not in form of a dataframe!\"\n logging.error(msg)\n raise ValueError(msg)\n\n if len(df) <= 30:\n msg = \"Dataset is too small to train a meta learner!\"\n logging.error(msg)\n raise ValueError(msg)\n\n if \"hpt_res\" not in df:\n msg = \"Missing best hyper-params, not able to train a meta learner!\"\n logging.error(msg)\n raise ValueError(msg)\n\n if \"features\" not in df:\n msg = \"Missing features, not able to train a meta learner!\"\n logging.error(msg)\n raise ValueError(msg)\n\n if \"best_model\" not in df:\n msg = \"Missing best models, not able to train a meta learner!\"\n logging.error(msg)\n raise ValueError(msg)\n\n self.df = df\n self.results = None\n\n def preprocess(self) -> List:\n # prepare the training data\n # Create training data table\n table = [\n {\n \"hpt_res\": self.df[\"hpt_res\"][i],\n \"features\": self.df[\"features\"][i],\n \"best_model\": self.df[\"best_model\"][i],\n }\n for i in range(len(self.df))\n ]\n\n # Change dtype of TSFeatures for compatibility\n for t in table:\n t[\"features\"] = change_dtype(t[\"features\"])\n\n # Scaling down certain params by num_secs_in_day to make models easier to converge\n for ts_data in table:\n for hpt_vals in ts_data[\"hpt_res\"].values():\n params = hpt_vals[0]\n for param in params.keys():\n if param in PARAMS_TO_SCALE_DOWN:\n params[param] = params[param] / NUM_SECS_IN_DAY\n return table\n\n def train(self) -> Dict[str, Dict[str, float]]:\n # call the train() method of MetaLearnModelSelect\n mlms = MetaLearnModelSelect(self.preprocess())\n self.results = mlms.train()\n return self.results\n\n def report_metrics(self) -> pd.DataFrame:\n # report the summary, as in the notebook N1154788\n if self.results is None:\n self.results = self.train()\n summary = pd.DataFrame([self.results[\"fit_error\"], self.results[\"pred_error\"]])\n summary[\"type\"] = [\"fit_error\", \"pred_error\"]\n summary[\"error_metric\"] = \"Inverted F-score\"\n return summary\n\n def predict(self, TimeSeriesData):\n # for a given timeseries data, predicts the best model\n # this can be omitted, for the bootcamp task (add later)\n raise ValueError(\"Predict method hasn't been implemented yet.\")\n","sub_path":"kats/detectors/meta_learning/metalearning_detection_model.py","file_name":"metalearning_detection_model.py","file_ext":"py","file_size_in_byte":3299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"217427690","text":"from typing import Tuple\nfrom typing import List\nfrom typing import Any\nfrom typing import Optional\n\nfrom matplotlib.patches import FancyArrowPatch\nfrom matplotlib.patches import ArrowStyle\nfrom compas.geometry import Point, Vector\n\nfrom compas.artists import PrimitiveArtist\nfrom .artist import PlotterArtist\n\nColor = Tuple[float, float, float]\n\n\nclass VectorArtist(PlotterArtist, PrimitiveArtist):\n \"\"\"Artist for COMPAS vectors.\n\n Parameters\n ----------\n vector : :class:`~compas.geometry.Vector`\n A COMPAS vector.\n point : :class:`~compas.geometry.Point`, optional\n A COMPAS point as base point for the vector.\n Default is the origin of the world coordinate system.\n draw_point : bool, optional\n If True, draw the point of application of the vector.\n color : tuple[float, float, float], optional\n Color of the vector.\n zorder : int, optional\n Stacking order of the vector on the canvas.\n **kwargs : dict, optional\n Additional keyword arguments.\n See :class:`~compas_plotters.artists.PlotterArtist` and :class:`~compas.artists.PrimitiveArtist` for more info.\n\n Attributes\n ----------\n vector : :class:`~compas.geometry.Vector`\n The vector associated with the artist.\n\n \"\"\"\n\n def __init__(\n self,\n vector: Vector,\n point: Optional[Point] = None,\n draw_point: bool = False,\n color: Color = (0, 0, 0),\n zorder: int = 3000,\n **kwargs: Any\n ):\n\n super().__init__(primitive=vector, **kwargs)\n\n self._mpl_vector = None\n self._point_artist = None\n self.draw_point = draw_point\n self.point = point or Point(0.0, 0.0, 0.0)\n self.color = color\n self.zorder = zorder\n\n @property\n def vector(self):\n return self.primitive\n\n @vector.setter\n def vector(self, vector):\n self.primitive = vector\n\n @property\n def data(self) -> List[List[float]]:\n return [self.point[:2], (self.point + self.vector)[:2]]\n\n def draw(self) -> None:\n \"\"\"Draw the vector.\n\n Returns\n -------\n None\n\n \"\"\"\n style = ArrowStyle(\"Simple, head_length=0.1, head_width=0.1, tail_width=0.02\")\n arrow = FancyArrowPatch(\n self.point[:2],\n (self.point + self.vector)[:2],\n arrowstyle=style,\n edgecolor=self.color,\n facecolor=self.color,\n zorder=self.zorder,\n mutation_scale=100,\n )\n if self.draw_point:\n self._point_artist = self.plotter.add(self.point, edgecolor=self.color)\n self._mpl_vector = self.plotter.axes.add_patch(arrow)\n\n def redraw(self):\n \"\"\"Update the vector using the current geometry and visualization settings.\n\n Returns\n -------\n None\n\n \"\"\"\n self._mpl_vector.set_positions(self.point[:2], (self.point + self.vector)[:2])\n if self.draw_point:\n self._point_artist.redraw()\n","sub_path":"src/compas_plotters/artists/vectorartist.py","file_name":"vectorartist.py","file_ext":"py","file_size_in_byte":3007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"63875042","text":"import json\n\n# this is the location of your data; has to be downloaded from http://cogcomp.org/multirc/\ninputFile = '/Users/daniel/ideaProjects/hard-qa/split/dev_83.json'\n\n\ndef main():\n eval('baseline-scores/human-01.json')\n\n\ndef avg(l):\n return reduce(lambda x, y: x + y, l) / len(l)\n\n\ndef eval(outFile):\n input = json.load(open(inputFile))\n output = json.load(open(outFile))\n outputMap = dict([[a[\"pid\"] + \"==\" + a[\"qid\"], a[\"scores\"]] for a in output])\n\n assert len(outputMap) == len(output), \"You probably have redundancies in your keys\"\n\n P1 = []\n R1 = []\n\n for p in input[\"data\"]:\n for qIdx, q in enumerate(p[\"paragraph\"][\"questions\"]):\n id = p[\"id\"] + \"==\" + str(qIdx)\n if (id in outputMap):\n predictedAns = outputMap.get(id)\n correctAns = [int(a[\"isAnswer\"]) for a in q[\"answers\"]]\n predictCount = sum(predictedAns)\n correctCount = sum(correctAns)\n agreementCount = sum([a * b for (a, b) in zip(correctAns, predictedAns)])\n p1 = (1.0 * agreementCount / correctCount) if correctCount > 0.0 else 1.0\n r1 = (1.0 * agreementCount / predictCount) if predictCount > 0.0 else 1.0\n P1.append(p1)\n R1.append(r1)\n else:\n print(\"The id \" + id + \" not found . . . \")\n\n print(\"Per question measures (i.e. precision-recall per question, then average) \")\n print(\"\\tP: \" + str(avg(P1)) + \" - R: \" + str(avg(R1)) + \" - F1m: \" + str(2 * avg(R1) * avg(P1) / (avg(P1) + avg(R1))))\n\n agreementCount = 0\n correctCount = 0\n predictCount = 0\n for p in input[\"data\"]:\n for qIdx, q in enumerate(p[\"paragraph\"][\"questions\"]):\n id = p[\"id\"] + \"==\" + str(qIdx)\n if (id in outputMap):\n predictedAns = outputMap.get(id)\n correctAns = [int(a[\"isAnswer\"]) for a in q[\"answers\"]]\n predictCount += sum(predictedAns)\n correctCount += sum(correctAns)\n agreementCount += sum([a * b for (a, b) in zip(correctAns, predictedAns)])\n else:\n print(\"The id \" + id + \" not found . . . \")\n\n p1 = (agreementCount / correctCount) if correctCount > 0.0 else 1.0\n r1 = (agreementCount / predictCount) if predictCount > 0.0 else 1.0\n P1.append(p1)\n R1.append(r1)\n\n print(\"Dataset-wide measures (i.e. precision-recall across all the candidate-answers in the dataset) \")\n print(\"\\tP: \" + str(avg(P1)) + \" - R: \" + str(avg(R1)) + \" - F1a: \" + str(2 * avg(R1) * avg(P1) / (avg(P1) + avg(R1))))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"eval/multirc-eval-v1.py","file_name":"multirc-eval-v1.py","file_ext":"py","file_size_in_byte":2669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"517858477","text":"from django.forms import ModelForm\nfrom .models import *\nfrom django.shortcuts import render, redirect, get_object_or_404\nimport datetime\nfrom .bo import calculadora\n\n\n# Create your views here.\n\ndef home(request, template_name='home_.html'):\n vnow = datetime.datetime.now()\n dnow = {'vnow': vnow}\n # html = \"It is now %s.\" %now\n # return HttpResponse(html)\n return render(request, template_name, dnow)\n\n\nclass LivroForm(ModelForm):\n class Meta:\n model = Livro\n fields = ['autor', 'editora', 'isbn', 'numeroPaginas', 'titulo', 'anoPublicacao', 'emailEditora', 'dataCadastro']\n\n\ndef livro_list(request, template_name='livro_list.html'):\n livro = Livro.objects.all()\n search = request.GET.get('search')\n if search:\n livro = livro.filter(titulo__icontains=search)\n livros = {'lista': livro}\n return render(request, template_name, livros)\n\n# def livro_busca(request, template_name='livro_list.html'):\n # livro = Livro.objects.filter(pk=1)\n # if request.method == \"POST\":\n # form\n # vbusca = {'lista': livro}\n # return render(request, template_name, vbusca)\n\n\ndef livro_new(request, template_name='livro_form.html'):\n form = LivroForm(request.POST or None)\n if form.is_valid():\n form.save()\n return redirect('livro_list')\n return render(request, template_name, {'form': form})\n\n\ndef livro_edit(request, pk, template_name='livro_form.html'):\n livro = get_object_or_404(Livro, pk=pk)\n if request.method == \"POST\":\n form = LivroForm(request.POST, instance=livro)\n if form.is_valid():\n livro = form.save()\n return redirect('livro_list')\n else:\n form = LivroForm(instance=livro)\n return render(request, template_name, {'form': form})\n\n\ndef livro_remove(request, pk):\n livro = Livro.objects.get(pk=pk)\n if request.method == \"POST\":\n livro.delete()\n return redirect('livro_list')\n return render(request, 'livro_delete.html', {'livro': livro})\n\n\ndef vbo(request):\n res2 = calculadora.somar(3, 5)\n fig = calculadora.grafico()\n # html = calculadora.plotly()\n dnow = {'vnow': fig, 'vnow2': fig}\n return render(request, 'home_.html', dnow)\n","sub_path":"djangoProjeto/projBiblioteca/biblioteca/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"298112976","text":"import random\n\ndef jogar():\n print(\"Bem vindo a Forca!\")\n\n arquivo = open(\"palavras.txt\",\"r\")\n palavras = []\n\n for linha in arquivo:\n palavras.append(linha.strip())\n arquivo.close()\n\n numero = random.randrange(0,len(palavras))\n\n palavra_objetivo = palavras[numero]\n print(palavra_objetivo)\n\n letras_encontradas = [\"_\" for letra in palavra_objetivo]\n print(letras_encontradas)\n print(\"Agora nossa IDE está acoplada diretamente ao projeto\")\n\n print(\"END GAME\")\n\nif(__name__ == \"__main__\"):\n jogar()","sub_path":"forca.py","file_name":"forca.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"152374257","text":"import panel as pn\nimport dask_cudf\n\nfrom .core_non_aggregate import BaseNonAggregate\nfrom ....layouts import chart_view\n\n\nclass BaseLine(BaseNonAggregate):\n stride = 0.0\n reset_event = None\n filter_widget = None\n no_color_set = False\n\n def __init__(\n self,\n x,\n y=None,\n data_points=100,\n add_interaction=True,\n pixel_shade_type=\"linear\",\n color=None,\n step_size=None,\n step_size_type=int,\n width=800,\n height=400,\n title=\"\",\n timeout=100,\n **library_specific_params,\n ):\n \"\"\"\n Description:\n\n -------------------------------------------\n Input:\n x\n y\n data_points\n add_interaction\n aggregate_fn\n width\n height\n step_size\n step_size_type\n x_label_map\n y_label_map\n width\n height\n title\n timeout\n **library_specific_params\n -------------------------------------------\n\n Ouput:\n\n \"\"\"\n self.x = x\n self.y = y\n self.data_points = data_points\n self.add_interaction = add_interaction\n if color is None:\n self.color = \"#8735fb\"\n self.no_color_set = True\n else:\n self.color = color\n\n self.stride = step_size\n self.stride_type = step_size_type\n self.pixel_shade_type = pixel_shade_type\n self.title = title\n self.timeout = timeout\n self.library_specific_params = library_specific_params\n self.width = width\n self.height = height\n\n def initiate_chart(self, dashboard_cls):\n \"\"\"\n Description:\n\n -------------------------------------------\n Input:\n data: cudf DataFrame\n -------------------------------------------\n\n Ouput:\n\n \"\"\"\n if type(dashboard_cls._cuxfilter_df.data) == dask_cudf.core.DataFrame:\n self.min_value = (\n dashboard_cls._cuxfilter_df.data[self.x].min().compute()\n )\n self.max_value = (\n dashboard_cls._cuxfilter_df.data[self.x].max().compute()\n )\n else:\n self.min_value = dashboard_cls._cuxfilter_df.data[self.x].min()\n self.max_value = dashboard_cls._cuxfilter_df.data[self.x].max()\n\n if self.data_points > len(dashboard_cls._cuxfilter_df.data):\n self.data_points = len(dashboard_cls._cuxfilter_df.data)\n\n if self.stride is None:\n if self.max_value < 1 and self.stride_type == int:\n self.stride_type = float\n if self.stride_type == int:\n self.stride = int(\n round((self.max_value - self.min_value) / self.data_points)\n )\n else:\n self.stride = float(\n (self.max_value - self.min_value) / self.data_points\n )\n\n self.calculate_source(dashboard_cls._cuxfilter_df.data)\n self.generate_chart()\n self.apply_mappers()\n\n if self.add_interaction:\n self.add_range_slider_filter(dashboard_cls)\n self.add_events(dashboard_cls)\n\n def view(self):\n return chart_view(self.chart, self.filter_widget, width=self.width)\n\n def add_range_slider_filter(self, dashboard_cls):\n \"\"\"\n Description: add range slider to the bottom of the chart,\n for the filter function to facilitate interaction\n behavior, that updates the rest\n of the charts on the page, using datatiles\n -------------------------------------------\n Input:\n\n -------------------------------------------\n\n Ouput:\n \"\"\"\n if self.stride is None:\n self.stride = self.stride_type(\n (self.max_value - self.min_value) / self.data_points\n )\n\n self.filter_widget = pn.widgets.RangeSlider(\n start=self.min_value,\n end=self.max_value,\n value=(self.min_value, self.max_value),\n step=self.stride,\n **{\"width\": self.width},\n sizing_mode=\"scale_width\",\n )\n\n def filter_widget_callback(event):\n if dashboard_cls._active_view != self.name:\n dashboard_cls._reset_current_view(new_active_view=self)\n dashboard_cls._calc_data_tiles()\n\n dashboard_cls._query_datatiles_by_range(event.new)\n\n # add callback to filter_Widget on value change\n self.filter_widget.param.watch(\n filter_widget_callback, [\"value\"], onlychanged=False\n )\n\n def compute_query_dict(self, query_str_dict):\n \"\"\"\n Description:\n\n -------------------------------------------\n Input:\n query_dict = reference to dashboard.__cls__.query_dict\n -------------------------------------------\n\n Ouput:\n \"\"\"\n if self.filter_widget.value != (\n self.filter_widget.start,\n self.filter_widget.end,\n ):\n min_temp, max_temp = self.filter_widget.value\n query_str_dict[self.name] = (\n str(min_temp) + \"<=\" + str(self.x) + \"<=\" + str(max_temp)\n )\n else:\n query_str_dict.pop(self.name, None)\n\n def add_events(self, dashboard_cls):\n \"\"\"\n Description:\n\n -------------------------------------------\n Input:\n\n -------------------------------------------\n\n Ouput:\n \"\"\"\n if self.reset_event is not None:\n self.add_reset_event(dashboard_cls)\n\n def add_reset_event(self, dashboard_cls):\n \"\"\"\n Description:\n\n -------------------------------------------\n Input:\n\n -------------------------------------------\n\n Ouput:\n \"\"\"\n\n def reset_callback(event):\n self.filter_widget.value = (\n self.filter_widget.start,\n self.filter_widget.end,\n )\n\n # add callback to reset chart button\n self.add_event(self.reset_event, reset_callback)\n","sub_path":"python/cuxfilter/charts/core/non_aggregate/core_line.py","file_name":"core_line.py","file_ext":"py","file_size_in_byte":6236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"154903197","text":"from enum import Enum\nfrom faker import Faker\nfrom faker_extensions.abstract_providers import WeightedProvider\nfrom faker_extensions.common_categories import BaseEnum\n\n\nclass Faith(BaseEnum):\n CHRISTIAN = 1\n BUDDHIST = 2\n HINDU = 3\n JEWISH = 4\n MUSLIM = 5\n SIKH = 6\n OTHER = 7\n NONE = 8\n NOT_STATED = 9\n\n\nclass FaithProvider(WeightedProvider):\n \"\"\" Faith distribution in the uk \"\"\"\n faith_distributions = {\n Faith.CHRISTIAN: 0.467,\n Faith.BUDDHIST: 0.005,\n Faith.HINDU: 0.018,\n Faith.JEWISH: 0.005,\n Faith.MUSLIM: 0.057,\n Faith.SIKH: 0.007,\n Faith.OTHER: 0.017,\n Faith.NONE: 0.421\n }\n\n def __init__(self, generator):\n super().__init__(self.faith_distributions, generator)\n\n def faith(self):\n return self.get_choice()\n\n\ndef main():\n fake = Faker(['en_UK'])\n fake.add_provider(FaithProvider(fake))\n\n faith = fake.faith()\n print(faith)\n print(faith.name.title())\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"source/faker_extensions/faith_provider.py","file_name":"faith_provider.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"184502476","text":"# Time Complexity : O(n)\n# Space Complexity : O(1)\n# Did this code successfully run on Leetcode : Yes\n# Any problem you faced while coding this : No\nclass Solution:\n def maxProfit(self, prices: List[int]) -> int:\n buy, sell = float('inf'), 0\n for price in prices:\n buy = min(price, buy)\n sell = max(sell, price-buy) \n return sell","sub_path":"121_Best_Time_to_Buy_and_Sell_Stock.py","file_name":"121_Best_Time_to_Buy_and_Sell_Stock.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"579431004","text":"from prettytable import PrettyTable\nfrom tabulate import tabulate\n\n # сортировка по мат ожиданию (по возрастанию)\ndef sortByME(outData):\n return sorted(outData, key=lambda data: data[0])\n\n\n # извлечен��е позиций из массивов и представление в строков формате\ndef viewPositions(arrayPosit):\n strOr = ''\n posit = ''\n for arr1 in arrayPosit:\n posit += strOr\n for el in arr1:\n posit += str(el)+', '\n posit = posit[:len(posit)-2]\n strOr = '\\nили\\n'\n return posit\n\n\n # вывод результатов в виде таблицы\ndef transmitData(outData):\n columnNames = ['Среднее количество\\nотключаемых потребителей',\n 'Количество\\nустанавливаемых КА',\n 'Места для установки']\n\n arrayOutData = []\n for nKA in sortByME(outData):\n arrayOutData.append((round(nKA[0], 2), nKA[1], viewPositions(nKA[2])))\n\n print()\n print(tabulate(arrayOutData, headers=columnNames,tablefmt='grid',colalign=(\"center\", \"center\", \"center\")))","sub_path":"outputModule.py","file_name":"outputModule.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"211083077","text":"import sys\nfrom django.core.management.base import BaseCommand\nfrom tickers.models import *\nfrom tickers.utils import * # get_time_ago, get_day_trading_of_mcap_percent\nimport humanize\n\nclass Command(BaseCommand):\n MINIMUM_READINGS_TO_PROCESS_TICKER_AS_INTERESTING_TO_WATCH = 100\n help = 'Find ticker with increasing/decreasing volume patterns.'\n l_tokens_perfect = []\n ONLY_PERFECT_IF_AVG_VOLUME_TO_MCAP_PERCENT_ABOVE_X = 4\n\n def add_arguments(self, parser):\n #parser.add_argument('-w', '--workers', type=int, default=1, help='number of workers.')\n parser.add_argument('-s', '--symbol', type=str, default=None, help='Specific symbol name')\n parser.add_argument('-t', '--alerttp', type=int, default=10, help='Alert when 24 volume / mcap percent above')\n parser.add_argument('-r', '--alertrrp', type=int, default=10, help='Alert rank rise percent')\n parser.add_argument('-mr', '--minreads', type=int, default=self.MINIMUM_READINGS_TO_PROCESS_TICKER_AS_INTERESTING_TO_WATCH, help='minimum readings to even start analyze a coin')\n #parser.add_argument('--workers-timeout', type=int)\n\n def handle(self, *args, **options):\n symbol = options['symbol']\n self.alert_trading_volume_percent_th = int(options['alerttp'])\n self.alert_rank_rise_percent_th = int(options['alertrrp'])\n self.minimum_readings_to_analyze_coin = int(options['minreads'])\n self.i_alert_rise_in_rank_count=0\n\n print(\"Started with symbol: %s\" % (symbol))\n print(\"Started with alert_trading_volume_percent_th: %s\" % (self.alert_trading_volume_percent_th))\n print(\"Started with alert_rank_rise_percent_th: %s\" % (self.alert_rank_rise_percent_th))\n if symbol:\n symbol = symbol.upper()\n\n print(\"Started with %s\" % symbol if symbol else \"no specific symbol (will go over all of them)\")\n\n if symbol:\n rs_which_coins = None\n rs = TickerHistory.objects.filter(symbol=symbol).order_by('-lastUpdated')\n else:\n rs_which_coins = Ticker.objects.all().order_by('-rank')\n\n if rs_which_coins:\n\n for rec_coin in rs_which_coins:\n rs = TickerHistory.objects.filter(symbol=rec_coin.symbol).order_by('-lastUpdated')\n if len(rs)>self.minimum_readings_to_analyze_coin:\n self.print_ticker_history_rs_data(rs)\n else:\n print(\"== Skipping %s with %s readings in total (min: %s)\\r\\n\" % (rec_coin.symbol, len(rs), self.minimum_readings_to_analyze_coin))\n else:\n self.print_ticker_history_rs_data(rs)\n\n #\n #\n #\n print(\"There are %d Perfect tokens - %s\" % (len(self.l_tokens_perfect), \",\".join(self.l_tokens_perfect) ))\n\n def print_ticker_history_rs_data(self, rs_TickerHistory):\n rs = rs_TickerHistory\n if rs:\n which_symbol = None\n rank_seen = None\n mcap_seen = None\n value_btc_seen = None\n trading24tomcap = None\n s_prev_displayed_percent_reading_in_period = None\n fl_coin_latest_base_btc_value = None\n\n sum_24h_trading_volume_to_mcad = 0\n count_24h_trading_volume_to_mcad = 0\n\n count_available_ticker_readings = len(rs)\n SHOW_X_TICKER_READINGS = 10\n\n print_reading_modulo = int(count_available_ticker_readings / SHOW_X_TICKER_READINGS)\n print(\"count_available_ticker_readings: %s\" % count_available_ticker_readings)\n print(\"print_reading_modulo: %s\" % print_reading_modulo)\n\n flt_max_24h_trading_volume_to_mcad_seen = None\n #\n print(\"=======================\\r\\n\")\n for indx_of_available_reading, reading in enumerate(rs):\n s_percent = get_day_trading_of_mcap_percent_for_obj(obj=reading)\n if s_percent != None:\n fl_percent_24h_trading_volume_to_mcad = float(s_percent.replace('%', ''))\n else:\n fl_percent_24h_trading_volume_to_mcad = None\n\n current_available_reading_percent_in_available_period = int((indx_of_available_reading / count_available_ticker_readings) * 100)\n if current_available_reading_percent_in_available_period % SHOW_X_TICKER_READINGS == 0 or indx_of_available_reading+1 == len(rs):\n if indx_of_available_reading+1 == len(rs):\n s_displayed_percent_reading_in_period = 100\n else:\n s_displayed_percent_reading_in_period = current_available_reading_percent_in_available_period\n\n # Print ticker if last ticker read (oldest one) or if we reached far enough from previous printed ticker\n if (indx_of_available_reading % print_reading_modulo == 0 and s_displayed_percent_reading_in_period != s_prev_displayed_percent_reading_in_period) or indx_of_available_reading+1 == len(rs):\n if fl_coin_latest_base_btc_value != None:\n percent_change_from_latest_btc_price = int(((fl_coin_latest_base_btc_value-reading.priceBtc) / reading.priceBtc)*100)\n s_change_from_base_btc_value = \" : %d%% %s within last %s days\" % (abs(percent_change_from_latest_btc_price), \"gain\" if percent_change_from_latest_btc_price >= 0 else \"loss\", (coin_latest_base_last_updated-reading.lastUpdated).days)\n else:\n s_change_from_base_btc_value = \"\"\n\n print(\"%s%% - %s symbol ticker was read %s, rank #%s, value %s BTC (%s%% daily change) with %s trading percent (MCAP: %s)%s\" % \\\n (s_displayed_percent_reading_in_period, reading.symbol, get_time_ago(reading.lastUpdated), reading.rank, reading.priceBtc, reading.percentChange24h, s_percent, format_using_humanize(reading.marketCapUsd, humanize.intword),\n s_change_from_base_btc_value) \\\n )\n if s_prev_displayed_percent_reading_in_period == None: # save base\n fl_coin_latest_base_btc_value = reading.priceBtc\n coin_latest_base_last_updated = reading.lastUpdated\n\n s_prev_displayed_percent_reading_in_period = s_displayed_percent_reading_in_period\n\n if fl_percent_24h_trading_volume_to_mcad != None:\n sum_24h_trading_volume_to_mcad += fl_percent_24h_trading_volume_to_mcad\n count_24h_trading_volume_to_mcad += 1\n\n if flt_max_24h_trading_volume_to_mcad_seen == None or flt_max_24h_trading_volume_to_mcad_seen < fl_percent_24h_trading_volume_to_mcad:\n flt_max_24h_trading_volume_to_mcad_seen = fl_percent_24h_trading_volume_to_mcad\n\n if not which_symbol:\n which_symbol = reading.symbol\n # rank\n if not rank_seen or reading.rank > rank_seen[1] or reading.rank < rank_seen[0]:\n if not rank_seen:\n rank_seen = [reading.rank , reading.rank]\n else:\n if reading.rank > rank_seen[1]:\n rank_seen[1] = reading.rank\n\n if reading.rank < rank_seen[0]:\n rank_seen[0] = reading.rank\n # value\n if not value_btc_seen or reading.priceBtc > value_btc_seen[1] or reading.priceBtc < value_btc_seen[0]:\n if not value_btc_seen:\n value_btc_seen = [reading.priceBtc , reading.priceBtc]\n else:\n if reading.priceBtc > value_btc_seen[1]:\n value_btc_seen[1] = reading.priceBtc\n\n if reading.priceBtc < value_btc_seen[0]:\n value_btc_seen[0] = reading.priceBtc\n\n # 24h trading / mcap\n if fl_percent_24h_trading_volume_to_mcad != None:\n if not trading24tomcap or fl_percent_24h_trading_volume_to_mcad > trading24tomcap[1] or fl_percent_24h_trading_volume_to_mcad < trading24tomcap[0]:\n if not trading24tomcap:\n trading24tomcap = [fl_percent_24h_trading_volume_to_mcad , fl_percent_24h_trading_volume_to_mcad]\n else:\n if fl_percent_24h_trading_volume_to_mcad > trading24tomcap[1]:\n trading24tomcap[1] = fl_percent_24h_trading_volume_to_mcad\n\n if fl_percent_24h_trading_volume_to_mcad < trading24tomcap[0]:\n trading24tomcap[0] = fl_percent_24h_trading_volume_to_mcad\n\n # mcap\n if reading.marketCapUsd != None:\n if not mcap_seen or reading.marketCapUsd > mcap_seen[1] or reading.marketCapUsd < mcap_seen[0]:\n if not mcap_seen:\n mcap_seen = [reading.marketCapUsd , reading.marketCapUsd]\n else:\n if reading.marketCapUsd > mcap_seen[1]:\n mcap_seen[1] = reading.marketCapUsd\n\n if reading.marketCapUsd < mcap_seen[0]:\n mcap_seen[0] = reading.marketCapUsd\n\n\n if flt_max_24h_trading_volume_to_mcad_seen != None and self.alert_trading_volume_percent_th != None and \\\n int(flt_max_24h_trading_volume_to_mcad_seen) > self.alert_trading_volume_percent_th:\n print(\"-- ALERT %s 24h trading / mcap\" % (flt_max_24h_trading_volume_to_mcad_seen))\n\n rank_most_recent_or_now = rs[0].rank\n rank_oldest_logged = rs[len(rs)-1].rank\n\n\n avg_24h_trading_volume_to_mcad = round(sum_24h_trading_volume_to_mcad / count_24h_trading_volume_to_mcad, 1) if count_24h_trading_volume_to_mcad else None\n\n s_alert_rise_in_rank = \"\"\n if rank_oldest_logged > rank_most_recent_or_now:\n percent_rank_rise = int((rank_oldest_logged - rank_most_recent_or_now ) / rank_oldest_logged * 100)\n if percent_rank_rise > self.alert_rank_rise_percent_th:\n\n s_detection_word = \"Hey\"\n if avg_24h_trading_volume_to_mcad != None and avg_24h_trading_volume_to_mcad > self.ONLY_PERFECT_IF_AVG_VOLUME_TO_MCAP_PERCENT_ABOVE_X:\n s_detection_word = \"Perfect\"\n self.l_tokens_perfect.append(which_symbol)\n\n s_alert_rise_in_rank = \"%d) %s, %s rank rises from rank #%s to rank #%s (+%s positions - %s%%)\\r\\n\" % \\\n (self.i_alert_rise_in_rank_count+1, s_detection_word, reading, rank_oldest_logged, rank_most_recent_or_now, rank_oldest_logged - rank_most_recent_or_now, percent_rank_rise)\n\n\n if s_alert_rise_in_rank != \"\":\n self.i_alert_rise_in_rank_count += 1\n\n\n\n print(\"=======================\\r\\n\"\n \"Summray for %s:\\r\\n\"\n \"%s Rank: #%s - #%s (latest rank: #%s)\\r\\n\"\n \"%s Value: %s - %s BTC (latest value: %s BTC)\\r\\n\"\n \"%s MCAP: %s - %s (latest Market Cap: %s)\\r\\n\"\n \"%s 24h Trading / MCAP: %s%% - %s%% (latest Trading / MCAP: %s, Avg. Trading / MCAP: %s%% from %d readings)\\r\\n\"\n \"%s\"%\n (which_symbol,\n which_symbol, rank_seen[0], rank_seen[1], rs[0].rank,\n which_symbol, value_btc_seen[0], value_btc_seen[1], rs[0].priceBtc,\n which_symbol, format_using_humanize(mcap_seen[0] if mcap_seen != None else None, humanize.intword), format_using_humanize(mcap_seen[1] if mcap_seen != None else None, humanize.intword),\n format_using_humanize(rs[0].marketCapUsd, humanize.intword),\n which_symbol, round(trading24tomcap[0],1) if trading24tomcap != None else None, round(trading24tomcap[1],1) if trading24tomcap != None else None, get_day_trading_of_mcap_percent_for_obj(obj=rs[0]),\n avg_24h_trading_volume_to_mcad,\n count_24h_trading_volume_to_mcad,\n s_alert_rise_in_rank\n )\n )\n\n\n\n\n\n","sub_path":"cmc_tickers/tickers/management/commands/volume-pat.py","file_name":"volume-pat.py","file_ext":"py","file_size_in_byte":12407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"221733458","text":"import json\n\n\nclass GameStats:\n \"\"\"Track statistics for Alien Invasion.\"\"\"\n\n def __init__(self, ai_game):\n \"\"\"Initialize statistics.\"\"\"\n self.settings = ai_game.settings\n self.reset_stats()\n # Start game in an inactive state.\n self.game_active = False\n # High score should never be reset.\n # Get High Score\n try:\n with open('high_score.json') as high_score:\n self.high_score = int(json.load(high_score))\n except:\n with open('high_score.json', 'w') as high_score:\n json.dump('0', high_score)\n\n def reset_stats(self):\n \"\"\"Initialize statistics that can change during the game.\"\"\"\n self.ships_left = self.settings.ship_limit\n self.score = 0\n self.level = 1\n","sub_path":"alien_invasion/game_stats.py","file_name":"game_stats.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"395765507","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef f(t, v0, g):\n return v0*t - 0.5*g*t**2\n\n\nv0 = 10\ng = 9.81\nt = np.linspace(0, 2*v0/g, 51)\ny = f(t, v0=v0, g=g)\n\nplt.plot(t, y)\nplt.xlabel('time (s)')\nplt.ylabel('height (m)')\nplt.show()\n","sub_path":"lecture3/solutions/exercise_3_1.py","file_name":"exercise_3_1.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"89440506","text":"from opytimizer.optimizers.wca import WCA\n\n# One should declare a hyperparameters object based\n# on the desired algorithm that will be used\nhyperparams = {\n 'nsr': 10,\n 'd_max': 0.1\n}\n\n# Creating a WCA optimizer\no = WCA(hyperparams=hyperparams)\n","sub_path":"examples/optimizers/create_wca.py","file_name":"create_wca.py","file_ext":"py","file_size_in_byte":251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"155407073","text":"from flask import Flask, session, render_template, Blueprint, abort, request, make_response, url_for, redirect, flash\nfrom app import db\nfrom app.service.orders import OrderService, OrderDetailService, OrderTempService\nfrom app.service.products import ProductService\n# from app.service.report import ReportService\nfrom flask_login import current_user\n# order_service = OrderService(db)\n# order_detail_service = OrderDetailService(db)\norder_temp_service = OrderTempService(db)\norder_service = OrderService(db)\nproduct_service = ProductService(db)\n# report_service = ReportService()\n\norder = Blueprint('order', __name__, url_prefix='/orders')\n\n@order.route('/create', methods = ['POST'])\ndef create_order():\n if request.method == 'POST':\n product_id = int(request.form['product_id'])\n if 'quantity' in request.form:\n quantity = request.form['quantity']\n else:\n quantity = 1\n product = product_service.find_product_by_id(product_id)\n if current_user.is_authenticated:\n order = order_temp_service.find_order_by_product_id(current_user.id, product_id)\n if order:\n order_temp_service.update(order, quantity)\n else:\n user_id = int(current_user.id)\n price = product.price - round((product.price*product.sale)/100, 2)\n order = order_temp_service.create_order_temp(user_id=user_id, product_id=product_id, quantity=quantity, price=price)\n if order:\n flash(u'Add product to cart successfully', 'success')\n else:\n if 'orders_temps' not in session:\n session['orders_temps'] = {}\n session['orders_temps'][str(product_id)] = quantity\n else:\n session['orders_temps'][str(product_id)] = quantity\n print(session['orders_temps'])\n return redirect(url_for('products.checkout'))\n@order.route('/delete/', methods = ['POST'])\ndef delete(id):\n if id:\n if current_user.is_authenticated:\n order = order_temp_service.delete_order_temp_by_id(id)\n if order:\n flash(u'Delete product successfully', 'success')\n else:\n del session['orders_temps'][id]\n print(session['orders_temps'])\n return redirect(url_for('products.checkout'))\n abort(404)\n\n@order.route('/save_change/', methods = ['POST'])\ndef save_change(id):\n if id:\n quality = request.form['quality']\n if current_user.is_authenticated:\n order = order_temp_service.update(id, quantity=quality)\n if order:\n flash(u'update product successfully', 'success')\n else:\n flash(u'update product error', 'warring')\n elif id in session['orders_temps']:\n session['orders_temps'][id] = quality\n flash(u'update product successfully', 'success')\n return redirect(url_for('products.checkout'))\n abort(404)\n\n@order.route(\"/download_invoice/\")\ndef download_invoice(id):\n if id:\n order = order_service.find_order_by_id(id)\n if order:\n order_details = order.order_details\n user_detail = current_user.user_detail\n pdf = report_service.to_pdf(context={\n 'user_detail': user_detail,\n 'order_date': order.date_created.strftime(\"%b %d %Y\"),\n 'order_details': order.order_details})\n response = make_response(pdf)\n response.headers['Content-Type'] = 'application/pdf'\n response.headers['Content-Disposition'] = 'attachment; filename=report.pdf'\n return response\n else:\n abort(404)\n","sub_path":"app/view/order.py","file_name":"order.py","file_ext":"py","file_size_in_byte":3717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"84204037","text":"from __future__ import annotations\nimport io\nfrom pathlib import Path\n\nimport PIL\n\nfrom .errors import S3Error\nfrom .logger import simple_logger\n\nlog = simple_logger(\"imgserve.s3\")\n\n\ndef s3_put_image(\n s3_client: botocore.clients.s3,\n image: Union[PIL.Image, Path, bytes],\n bucket: str,\n object_path: Path,\n overwrite: bool = False,\n) -> None:\n\n if isinstance(image, PIL.Image.Image):\n image_bytes = io.BytesIO()\n image.save(image_bytes, format=\"PNG\")\n image_bytes = image_bytes.getvalue()\n elif isinstance(image, Path):\n image_bytes = image.read_bytes()\n elif isinstance(image, bytes):\n image_bytes = image\n else:\n raise ValueError(f\"{image} is not a known type\")\n\n try:\n # only write images to s3 that don't already exist unless overwrite is passed\n try:\n s3_client.get_object(Bucket=bucket, Key=str(object_path))\n if not overwrite:\n log.debug(f\"{object_path} already exists in s3, not overwriting\")\n return\n except s3_client.exceptions.NoSuchKey:\n pass\n\n s3_client.put_object(Body=image_bytes, Bucket=bucket, Key=str(object_path))\n log.info(f\"uploaded {object_path} to s3.\")\n except s3_client.exceptions.ClientError:\n s3_client_attributes = {\n attr: getattr(s3_client, attr) for attr in s3_client.__dict__.keys()\n }\n s3_client_attributes.update({\n \"bucket\": bucket,\n \"object_path\": object_path,\n })\n raise S3Error(f\"{s3_client_attributes} S3 ClientError\")\n\n\ndef get_s3_bytes(\n s3_client: botocore.clients.s3, bucket_name: str, s3_path: Path\n) -> bytes:\n return s3_client.get_object(Bucket=bucket_name, Key=str(s3_path))[\"Body\"].read()\n","sub_path":"src/imgserve/s3.py","file_name":"s3.py","file_ext":"py","file_size_in_byte":1785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"581201203","text":"\"\"\"\n\nhttps://www.youtube.com/watch?v=LRlDngwgiuw&list=PL1A2CSdiySGLPTXm0cTxlGYbReGqTcGRA&index=6\n\nModulating of splitting up a program into multiple files\nallows for complex packages to be made\ngood for writing tool packs for other developers\n\"\"\"\nimport vec2d\nfrom vec2d import Vector2D # * loads everything\n\ndef Main():\n vec1 = vec2d.Vector2D(5, 6) #module.classname\n vec2 = vec2d.Vector2D(1, 1)\n\n vec3 = Vector2D(8, 8)\n vec4 = Vector2D(9, 9)\n \n print(vec1.x, \" \", vec1.y)\n print(vec2.x, \" \", vec2.y)\n print(vec3.x, \" \", vec3.y)\n print(vec4.x, \" \", vec4.y)\n\nif __name__ == \"__main__\":\n Main()\n","sub_path":"02_Med_Tutorial_06_ModulatingCode2.py","file_name":"02_Med_Tutorial_06_ModulatingCode2.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"591017874","text":"#!/usr/bin/env python3\n\nimport concurrent.futures, glob, json, os, re\n\ndef exec_ipynb(filename_or_url):\n nb = (requests.get(filename_or_url).json() if re.match(r'https?:', filename_or_url) else json.load(open(filename_or_url)))\n if(nb['nbformat'] >= 4):\n src = [''.join(cell['source']) for cell in nb['cells'] if cell['cell_type'] == 'code']\n else:\n src = [''.join(cell['input']) for cell in nb['worksheets'][0]['cells'] if cell['cell_type'] == 'code']\n exec('\\n'.join(src), globals())\n\n\nos.chdir(os.path.dirname(__file__))\nexec_ipynb('python-utils/utils.ipynb')\nexec(open('python-utils/config-utils.py').read(), globals())\n\nfor service in get_services():\n subprocess_check('systemctl restart %s' % service, verbose=True)\n","sub_path":"restart.py","file_name":"restart.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"67266129","text":"from selenium import webdriver\nimport time\n\nbrowser = webdriver.Chrome()\nbrowser.get('https://www.w3schools.com/tags/tryit.asp?filename=tryhtml5_input_type_radio')\n\n# frame 전환\nbrowser.switch_to.frame('iframeResult')\n# //*[@id=\"html\"]\nelem = browser.find_element_by_xpath('//*[@id=\"html\"]')\nelem.click()\n\nbrowser.switch_to.default_content() # 상위로 빠져 나옴\n\ntime.sleep(5)\nbrowser.quit()\n","sub_path":"rpa_basic/3_web/4_iframe.py","file_name":"4_iframe.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"556823432","text":"'''\nCreated on Nov 20, 2013\n\n@author: samith\n'''\nimport os\nWEEKLY_ARFF_PATH = \"/home/samith/weekly.arff\"\nMONTHLY_ARFF_PATH = \"/home/samith/monthly.arff\"\nINDEX_COL = \"Date\"\nDATA_COL = \"frequency_twitter\"\nINPUT_QUEUE_END_IDENTIFIER = \"STOP\"\nOUTPUT_QUEUE_END_IDENTIFIER = \"STOP\"\nBASE_WEEKLY_ARFF_PATH = str(os.getcwd())+\"/arff/weekly/process_%d/\"\nBASE_MONTHLY_ARFF_PATH = str(os.getcwd())+\"/arff/monthly/process_%d/\"\nNUMBER_OF_FETCH_PROCESSES = 1\nNUMBER_OF_ANALYZE_PROCESSES = 2\nNUMBER_OF_OUTPUT_PROCESSES = 1\nINPUT_QUEUE_LIMIT = 30\nOUTPUT_QUEUE_LIMIT = 30\nFINALIZE_COUNT = 10\nRUN_MAIN_PROCESS = True\nMAIN_PROCESS_SLEEP_TIME = 3600\nTS_START_DATE = \"2011-09-01\"\nGATEWAY_SERVER_PORT = 25335\n\n# DB connection\nMYSQL_HOST_IP = \"192.248.8.247\"\nMYSQL_PORT = 3306\nMYSQL_USER = \"root\"\nMYSQL_PW = \"selchi123\"\nMYSQL_DB_PLACES = \"PlacesDB\"\nMYSQL_DB_ACTIVITY = \"ActivityDB\"\n\n# Ontology\nONTOLOGY_URL = \"http://192.248.8.252/\"\nUPDATE_URL = ONTOLOGY_URL+ 'TravelDataWebService/rest/updatetrends/%s/%s/%s/%s/%d/%d'\nFINALIZE_URL = ONTOLOGY_URL+ 'TravelDataWebService/rest/updatetrends/finalize'\n\ndef init_arff_locs(number_of_process):\n for i in range(number_of_process):\n weekly = BASE_WEEKLY_ARFF_PATH %(i)\n monthly= BASE_MONTHLY_ARFF_PATH % (i)\n for directory in [weekly ,monthly]:\n if not os.path.exists(directory):\n os.makedirs(directory)","sub_path":"TimeSeriesAnalysis/Python/TimeSeriesForecast/src/timeseriesforecast/main/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"398728445","text":"from flask import Flask\nimport config\nimport os\n\ndef config_app(app):\n config_name = os.getenv('FLASK_ENV', 'default')\n app.config.from_pyfile('config.py')\n app.config.from_object(config_env[config_name])\n\napp = Flask(__name__, instance_relative_config=True)\nconfig_env = {\n 'default': 'config.default.Config',\n 'development': 'config.development.DevelopmentConfig',\n 'production': 'config.production.ProductionConfig',\n 'testing': 'config.testing.TestingConfig'\n}\nconfig_app(app)\n","sub_path":"app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"398539211","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n#\n# Copyright © 2013-2014 Felix Crux and the Authors.\n# Released under the terms of the MIT License (Expat Version).\n# See the LICENSE and AUTHORS files for further details.\n#\n\n\nimport datetime\nimport os\nimport tempfile\nimport unittest\n\nimport libwedger\n\n\nclass TestGetTransactionRegister(unittest.TestCase):\n\n def setUp(self):\n self.ledger_file = tempfile.NamedTemporaryFile(\"w+\")\n os.environ[\"LEDGER_FILE\"] = self.ledger_file.name\n os.environ[\"HOME\"] = tempfile.gettempdir()\n\n def tearDown(self):\n self.ledger_file.close()\n\n def test_no_transactions(self):\n self.assertEqual(libwedger.get_transaction_register(), [])\n\n def test_one_transaction(self):\n self.ledger_file.write(\"2013-11-12 Writing Tests\\n\"\n \" Time -30 minutes\\n\"\n \" QA\")\n self.ledger_file.flush()\n self.assertEqual(\n libwedger.get_transaction_register(),\n [{\"date\": datetime.date(2013, 11, 12),\n \"payee\": \"Writing Tests\",\n \"accounts\": [{\"account\": \"Time\", \"amount\": \"-30 minutes\"},\n {\"account\": \"QA\", \"amount\": \"30 minutes\"}]}])\n\n def test_multiple_transactions(self):\n self.ledger_file.write(\"2013-11-12 Writing Tests\\n\"\n \" Time -30 minutes\\n\"\n \" QA\\n\"\n \"2013-11-11 Planning\\n\"\n \" Time -15 minutes\\n\"\n \" Plans\\n\")\n self.ledger_file.flush()\n self.assertEqual(\n libwedger.get_transaction_register(),\n [{\"date\": datetime.date(2013, 11, 12),\n \"payee\": \"Writing Tests\",\n \"accounts\": [{\"account\": \"Time\", \"amount\": \"-30 minutes\"},\n {\"account\": \"QA\", \"amount\": \"30 minutes\"}]},\n {\"date\": datetime.date(2013, 11, 11),\n \"payee\": \"Planning\",\n \"accounts\": [{\"account\": \"Time\", \"amount\": \"-15 minutes\"},\n {\"account\": \"Plans\", \"amount\": \"15 minutes\"}]}])\n\n def test_multiple_entries_per_transaction(self):\n self.ledger_file.write(\"2013-11-12 Writing Tests\\n\"\n \" Time -30 minutes\\n\"\n \" QA 15 minutes\\n\"\n \" Plans 5 minutes\\n\"\n \" Bugfixes\")\n self.ledger_file.flush()\n self.assertEqual(\n libwedger.get_transaction_register(),\n [{\"date\": datetime.date(2013, 11, 12),\n \"payee\": \"Writing Tests\",\n \"accounts\": [{\"account\": \"Time\", \"amount\": \"-30 minutes\"},\n {\"account\": \"QA\", \"amount\": \"15 minutes\"},\n {\"account\": \"Plans\", \"amount\": \"5 minutes\"},\n {\"account\": \"Bugfixes\", \"amount\": \"10 minutes\"}]}])\n\n def test_commodities_with_price_transaction(self):\n self.ledger_file.write(\"2013-11-12 Writing Tests\\n\"\n \" Time -30 minutes @ $1\\n\"\n \" QA\")\n self.ledger_file.flush()\n self.assertEqual(\n libwedger.get_transaction_register(),\n [{\"date\": datetime.date(2013, 11, 12),\n \"payee\": \"Writing Tests\",\n \"accounts\": [{\"account\": \"Time\", \"amount\": \"-30 minutes\"},\n {\"account\": \"QA\", \"amount\": \"$30\"}]}])\n","sub_path":"tst/test_get_transaction_register.py","file_name":"test_get_transaction_register.py","file_ext":"py","file_size_in_byte":3585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"85060605","text":"from opengever.activity import notification_center\nfrom opengever.activity.roles import COMMITTEE_RESPONSIBLE_ROLE\nfrom opengever.activity.roles import PROPOSAL_ISSUER_ROLE\nfrom opengever.meeting.activity.helpers import get_users_by_group\n\n\ndef add_watchers_on_submitted_proposal_created(obj):\n groupid = obj.load_model().committee.group_id\n for user in get_users_by_group(groupid) or []:\n notification_center().add_watcher_to_resource(\n obj, user.userid, COMMITTEE_RESPONSIBLE_ROLE)\n\n\ndef remove_watchers_on_submitted_proposal_deleted(obj, groupid):\n for user in get_users_by_group(groupid) or []:\n notification_center().remove_watcher_from_resource(\n obj, user.userid, COMMITTEE_RESPONSIBLE_ROLE)\n\n\ndef add_watcher_on_proposal_created(obj):\n notification_center().add_watcher_to_resource(\n obj, obj.issuer, PROPOSAL_ISSUER_ROLE)\n\n\ndef change_watcher_on_proposal_edited(obj, new_userid):\n center = notification_center()\n center.remove_watcher_from_resource(\n obj, obj.issuer, PROPOSAL_ISSUER_ROLE)\n center.add_watcher_to_resource(\n obj, new_userid, PROPOSAL_ISSUER_ROLE)\n","sub_path":"opengever/meeting/activity/watchers.py","file_name":"watchers.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"33869533","text":"import cv2\nimport sys\nimport matplotlib.pyplot as plt\n# Get user supplied values\nimagePath = \"/home/hamza/Pictures/FaceDetect-master/abba.png\"#sys.argv[1]\ncascPath = \"haarcascade_frontalface_default.xml\"\n\n# Create the haar cascade\nfaceCascade = cv2.CascadeClassifier(cascPath)\n\n# Read the image\nimage = cv2.imread(imagePath)\ngray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n# Detect faces in the image\nfaces = faceCascade.detectMultiScale(\n gray,\n scaleFactor=1.1,\n minNeighbors=5,\n minSize=(30, 30),\n flags = cv2.cv.CV_HAAR_SCALE_IMAGE\n)\n\nprint(\"Found {0} faces!\".format(len(faces)))\ninc = 121\n# Draw a rectangle around the faces\nfor (x, y, w, h) in faces:\n cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2)\n crop_img = image[y:y+h, x:x+w]\n # cv2.imshow(str(inc) , crop_img)\n # cv2.imwrite(str(inc) + '.png', crop_img)\n plt.subplot(inc), plt.imshow(crop_img),plt.title('as')\n inc = inc + 1\n # break\n # print inc\n\n\nplt.show()\n# cv2.imshow(\"croped\" , crop_img)plt.show()\n\n\n# cv2.imshow(\"Faces found\", image)\ncv2.waitKey(0)\n","sub_path":"FaceDetect-master/face_detect.py","file_name":"face_detect.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"506547898","text":"# NAME: Javier E. Zapanta (j.zapanta@snhu.edu)\n# DATE: 2019 May 14\n# COURSE: IT-140\n# PROGRAM: High Score\n#\n# PURPOSE: This program demonstrates the use of an if statement.\n# RUNTIME: Python 2+\n#\n# CREDIT: Gaddis, T. (2012). Starting Out with C++: From Control Structures Through Objects. Pearson Addison-Wesley. Retrieved from https://books.google.com/books?id=Xbt0uQAACAAJ\n\ndef main():\n \"\"\" Main function \n \"\"\"\n\n # Initialize high score\n HIGH_SCORE = 95\n\n # Display heading\n print(\"HIGH SCORE PROGRAM\\n\")\n\n # Display instructions\n print(\"Enter 3 test scores and I will average them\")\n\n # Get test scores.\n # NOTE: Using \"input\" function to get input from keyboard.\n # We will assume that data entered will be numerical test scores.\n score1 = float(input(\"Score 1: \"))\n score2 = float(input(\"Score 2: \"))\n score3 = float(input(\"Score 3: \"))\n\n # Calculate the average and display results\n average = (score1 + score2 + score3) / 3.0\n print (\"Your average is \" + str(average))\n\n # If the average is a high score, congratulate the user\n if (average > HIGH_SCORE):\n print (\"Congratulations! That's a high score!\")\n\n\n# See https://runestone.academy/runestone/static/thinkcspy/Functions/mainfunction.html\n#\n# call main function if this is the main file\nif __name__ == \"__main__\":\n # Go to Line 28 when \"main\" is called\n main()","sub_path":"module02/02_if_only.py","file_name":"02_if_only.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"448722734","text":"import glob\nimport matplotlib.pyplot as plt\n\nfrom scipy.io import mmread\nfrom sklearn.decomposition import PCA\n\npath = 'blood_downloads/*.mtx'\nfiles = glob.glob(path)\n\nmatrices = []\n\nprint(\"Reading files...\")\n\nfor file in files:\n file_name = file.split('/')[-1].split('.')[0]\n print(f\"\\t> Reading file {file}\")\n matrix = mmread(file)\n matrices.append({\n 'project_ID': file_name,\n 'matrix': matrix\n })\n\n\nprojected_matrices = []\n\nprint(\"Principal component analysis...\")\n\nfor matrix in matrices:\n print(f\"\\t> Doing PCA of {matrix['project_ID']}\")\n\n pca = PCA(n_components=2)\n projected = pca.fit_transform(matrix['matrix'].toarray())\n\n matrix['PCA_projected'] = projected\n\nprint(\"Plotting 2 PCs...\")\n\nfor matrix in matrices:\n print(f\"\\t> Plotting PC1 and PC2 of {matrix['project_ID']}\")\n plt.scatter(matrix['PCA_projected'][:, 0], matrix['PCA_projected'][:, 1], alpha=0.5, label=matrix['project_ID'])\n\n plt.savefig('blood_downloads/' + matrix['project_ID'] + '.png')","sub_path":"Experiments/Clustering.py","file_name":"Clustering.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"603515306","text":"rng1 = range(1, 6)\r\nlst1 = list(rng1)\r\nrng2 = range(6, 11)\r\nlst2 = list(rng2)\r\n\r\n\r\ndef tong_lst(lst1):\r\n total = 0\r\n for e in lst1:\r\n total = total + e\r\n return total\r\n\r\n\r\nprint(tong_lst(lst1))\r\n\r\n\r\ndef gom_lst(lst1, lst2):\r\n lst3 = []\r\n for e in lst1:\r\n lst3.append(e)\r\n for e in lst2:\r\n lst3.append(e)\r\n return lst3\r\n\r\n\r\nprint(gom_lst(lst1, lst2))\r\n\r\n\r\n# Dựa vào hàm 2 hàm trên, viết hàm tongNhieuArray: - nhận vào 2 array integer - trả về tổng các phần tử\r\ndef tong_nhieu_lst(lst1, lst2):\r\n tong = 0\r\n for e in lst1:\r\n tong = tong + e\r\n for e in lst2:\r\n tong = tong + e\r\n return tong\r\n\r\n\r\nprint(tong_nhieu_lst(lst1, lst2))\r\n\r\n\r\n# Viết hàm inArray: - nhận vào 1 array integer - in ra toàn bộ các phần tử của array input - ko trả về bất cứ cái gì\r\ndef in_lst(integ):\r\n for e in integ:\r\n print(e)\r\n\r\n\r\nin_lst(lst1)\r\n\r\nx = 200\r\n\r\n\r\nclass Point:\r\n def __init__(self, x, y, name):\r\n self.x = x\r\n self.y = y\r\n self.name = name\r\n\r\n\r\ndef test_point():\r\n diem = Point(9, 89, \"meo\")\r\n print(diem.x)\r\n print(diem.name)\r\n diem.x = 150\r\n print(diem.x)\r\n\r\n diem_3 = Point(name=\"Ngâu\", y=65, x=45)\r\n print(diem_3.name)\r\n\r\n\r\ntest_point()\r\n\r\n\r\nclass Line:\r\n def __init__(self, point1, point2):\r\n self.point_1 = point1\r\n self.point_2 = point2\r\n\r\n\r\ndef test_line():\r\n diem1 = Point(8, 6, \"point1\")\r\n diem2 = Point(6, 3, \"point2\")\r\n dong = Line(Point(8, 6, \"point1\"), Point(6, 3, \"point2\"))\r\n dong_1 = Line(point1=Point(70, 80, \"500\"), point2=Point(79, 86, \"502\"))\r\n\r\n\r\ntest_line()\r\n\r\n\r\nclass Job:\r\n def __init__(self, tieu_de, lg_gio, gio_bd, gio_kt):\r\n self.title = tieu_de\r\n self.salary = lg_gio\r\n self.startingWorkingTime = gio_bd\r\n self.endingWorkingTime = gio_kt\r\n\r\n @staticmethod\r\n def create_job(ten_cv,lg_gio,gio_bd,gio_kt):\r\n jo = Job(ten_cv,lg_gio,gio_bd,gio_kt)\r\n return jo\r\n\r\n\r\ndef test_job():\r\n cv = Job(\"BA\", 200, 8, 18)\r\n cv_1 = Job(tieu_de=\"DA\", lg_gio=600, gio_bd=9, gio_kt=18)\r\n\r\n\r\nclass Person:\r\n def __init__(self):\r\n self.__name\r\n self._age\r\n self.job\r\n self.startWorkingDay\r\n\r\n\r\ndef bai_09_07():\r\n index_1 = \"A\"\r\n index_2 = \"QQ1\"\r\n index_3 = index_1 + index_2\r\n str = \"123,44,55,88\"\r\n lst_1 = str.split(',')\r\n for e in lst_1:\r\n kq = index_3 + e\r\n print(kq)\r\n\r\n\r\nbai_09_07()\r\n","sub_path":"folder1/BT_06_07.py","file_name":"BT_06_07.py","file_ext":"py","file_size_in_byte":2502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"148488620","text":"# -*- coding: utf-8 -*-\n\nfrom rest_framework import serializers\n\nfrom tests.models import Book\n\n\nclass BookSerializer(serializers.ModelSerializer):\n tags = serializers.StringRelatedField(many=True)\n\n class Meta:\n model = Book\n fields = (\n 'id',\n 'tags',\n 'title',\n )\n","sub_path":"tests/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"419282888","text":"import pylab\nimport networkx as nx\nimport csv\nimport numpy as np\n\nG = nx.Graph()\nD = nx.DiGraph()\n\ndef newgraph(G):\n G.clear()\n with open(input(\"Please enter your filepath: \")) as csv_file:\n reader = csv.reader(csv_file)\n next(reader)\n source = []\n target = []\n value = []\n\n for row in reader:\n source.append(int(row[0]))\n target.append(int(row[1]))\n value.append(int(row[2]))\n\n for i in range(0, np.size(target) + 1):\n G.add_node(i)\n\n for i in range(np.size(source)):\n G.add_weighted_edges_from([(source[i], target[i], value[i])])\n print(source[i],target[i],value[i])\n\ndef newdigraph(D):\n D.clear()\n with open(input(\"Please enter your filepath: \")) as csv_file:\n reader = csv.reader(csv_file)\n next(reader)\n source = []\n target = []\n value = []\n\n for row in reader:\n source.append(int(row[0]))\n target.append(int(row[1]))\n value.append(int(row[2]))\n\n for i in range(0, np.size(target) + 1):\n D.add_node(i)\n\n for i in range(np.size(source)):\n D.add_weighted_edges_from([(source[i], target[i], value[i])])\n print(source[i],target[i],value[i])\n\ndef vitality(G):\n newgraph(G)\n selected_node = int(input(\"Enter the vitality of the node you wish to see: \"))\n print(nx.closeness_vitality(G,selected_node))\n\n # shows graph with the node removed\n G.remove_node(selected_node)\n pos = nx.spring_layout(G)\n nx.draw(G, pos, with_labels=True, node_color='b', edge_color='k', node_size=200, alpha=0.5)\n pylab.title('Self_Define Net', fontsize=15)\n pylab.show()\n\nvitality(G)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"624994710","text":"import uuid\nfrom datetime import timedelta\n\nimport pytest\nfrom Crypto.Random import get_random_bytes\nfrom django.db import IntegrityError\nfrom django.test import Client\nfrom django.utils import timezone\nfrom freezegun import freeze_time\nfrom jsonrpc.proxy import TestingServiceProxy\n\n# MONKEY-PATCH django-jsonrpc package so that it uses Extended Json on proxy requests\nfrom bson.json_util import dumps, loads\nfrom jsonrpc import proxy\n\nfrom wacryptolib.encryption import _encrypt_via_rsa_oaep\nfrom wacryptolib.key_generation import load_asymmetric_key_from_pem_bytestring\nfrom wacryptolib.signature import verify_message_signature\nfrom waescrow import escrow_api\nfrom waescrow.escrow_api import SqlKeyStorage\nfrom waescrow.models import EscrowKeypair\n\nassert proxy.loads\nproxy.loads = loads\nassert proxy.dumps\nproxy.dumps = dumps\n\n\ndef _get_jsonrpc_result(response_dict):\n assert isinstance(response_dict, dict)\n assert \"error\" not in response_dict\n return response_dict[\"result\"]\n\n\ndef test_sql_key_storage(db):\n\n key_storage = SqlKeyStorage()\n\n keychain_uid1 = uuid.uuid4()\n keychain_uid2 = uuid.uuid4()\n keychain_uid_unexisting = uuid.uuid4()\n\n key_storage.set_keypair(keychain_uid=keychain_uid1, key_type=\"RSA\", keypair=dict(a=2))\n key_storage.set_keypair(keychain_uid=keychain_uid2, key_type=\"RSA\", keypair=dict(B=\"xyz\"))\n key_storage.set_keypair(keychain_uid=keychain_uid1, key_type=\"DSA\", keypair=dict(c=b\"99\"))\n key_storage.set_keypair(keychain_uid=keychain_uid2, key_type=\"DSA\", keypair=dict(D=1.0))\n\n assert key_storage.get_keypair(keychain_uid=keychain_uid1, key_type=\"RSA\") == dict(a=2)\n assert key_storage.get_keypair(keychain_uid=keychain_uid2, key_type=\"RSA\") == dict(B=\"xyz\")\n assert key_storage.get_keypair(keychain_uid=keychain_uid1, key_type=\"DSA\") == dict(c=b\"99\")\n assert key_storage.get_keypair(keychain_uid=keychain_uid2, key_type=\"DSA\") == dict(D=1.0)\n\n assert key_storage.get_keypair(keychain_uid=keychain_uid_unexisting, key_type=\"RSA\") == None\n\n with pytest.raises(IntegrityError): # Final tests, since it breaks current DB transaction\n key_storage.set_keypair(keychain_uid=keychain_uid1, key_type=\"RSA\", keypair=dict(a=3))\n\n\ndef test_waescrow_escrow_api_workflow(db):\n\n escrow_proxy = TestingServiceProxy(\n client=Client(), service_url=\"/json/\", version=\"2.0\"\n )\n\n keychain_uid = uuid.uuid4()\n key_type= \"RSA\"\n secret = get_random_bytes(101)\n\n public_key_pem = _get_jsonrpc_result(escrow_proxy.get_public_key(keychain_uid=keychain_uid, key_type=\"RSA\"))\n public_key = load_asymmetric_key_from_pem_bytestring(\n key_pem=public_key_pem, key_type=key_type\n )\n\n signature = _get_jsonrpc_result(escrow_proxy.get_message_signature(\n keychain_uid=keychain_uid, message=secret, key_type=key_type, signature_algo=\"PSS\"\n ))\n verify_message_signature(\n message=secret, signature=signature, key=public_key, signature_algo=\"PSS\"\n )\n\n signature[\"digest\"] += b\"xyz\"\n with pytest.raises(ValueError, match=\"Incorrect signature\"):\n verify_message_signature(\n message=secret, signature=signature, key=public_key, signature_algo=\"PSS\"\n )\n\n cipherdict = _encrypt_via_rsa_oaep(plaintext=secret, key=public_key)\n\n def _attempt_decryption():\n return escrow_proxy.decrypt_with_private_key(\n keychain_uid=keychain_uid, key_type=key_type, encryption_algo=\"RSA_OAEP\", cipherdict=cipherdict\n )\n\n with freeze_time() as frozen_datetime:\n\n with pytest.raises(RuntimeError, match=\"Decryption not authorized\"):\n _attempt_decryption()\n\n keypair_obj = EscrowKeypair.objects.get(keychain_uid=keychain_uid, key_type=key_type)\n keypair_obj.decryption_authorized_at = timezone.now() + timedelta(hours = 2)\n keypair_obj.save()\n\n with pytest.raises(RuntimeError, match=\"Decryption authorization is not currently active\"):\n _attempt_decryption() # Too early\n\n frozen_datetime.tick(delta=timedelta(hours=3))\n\n decrypted = _get_jsonrpc_result(_attempt_decryption())\n assert decrypted == secret # It works!\n\n cipherdict[\"digest_list\"].append(b\"aaabbbccc\")\n with pytest.raises(ValueError, match=\"Ciphertext with incorrect length\"):\n # Django test client reraises signalled exception\n escrow_proxy.decrypt_with_private_key(\n keychain_uid=keychain_uid, key_type=key_type, encryption_algo=\"RSA_OAEP\", cipherdict=cipherdict\n )\n\n frozen_datetime.tick(delta=timedelta(hours=24)) # We hardcode DECRYPTION_AUTHORIZATION_LIFESPAN_H here\n\n with pytest.raises(RuntimeError, match=\"Decryption authorization is not currently active\"):\n _attempt_decryption() # Too late, cipherdict is not even used so no ValueError\n\n keypair_obj.decryption_authorized_at = None\n keypair_obj.save()\n\n with pytest.raises(RuntimeError, match=\"Decryption not authorized\"):\n _attempt_decryption() # No more authorization at all\n\n\n","sub_path":"tests/test_waserver_escrow_api.py","file_name":"test_waserver_escrow_api.py","file_ext":"py","file_size_in_byte":5085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"285171722","text":"from io import StringIO\n\nimport ruamel.yaml\n\nyaml = ruamel.yaml.YAML(typ='rt')\nyaml.width = 4096\nyaml.indent(mapping=2, sequence=4, offset=2)\nyaml.default_style='\"'\n\nwith open('base_groups.txt') as file:\n data = file.read().split('\\n')\n\ngroups = []\n\nfor l in data:\n\n groups.append({\n 'directory_group': l,\n 'adobe_groups': [l]\n })\n\ns = StringIO()\nyaml.dump(groups, s)\nd = (s.getvalue()\n .replace('\"directory_group\"','directory_group')\n .replace('\"adobe_groups\"','adobe_groups'))\n\nwith open('results.yaml', 'w') as results:\n results.write(d)\n","sub_path":"auto_group_generator.py","file_name":"auto_group_generator.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"639831880","text":"import pygame\npygame.init()\n\nscreen = pygame.display.set_mode((640, 480))\npygame.display.set_caption(\"Using a multi-image master file\")\n\nclass Chopper(pygame.sprite.Sprite):\n\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n self.loadImages()\n\n self.frame = 0\n self.delay = 3\n self.pause = 0\n\n self.image = self.imgList[0]\n self.rect = self.image.get_rect()\n\n self.rect.center = (screen.get_width()/2, screen.get_height()/2)\n\n def loadImages(self):\n imgMaster = pygame.image.load(\"heli3.bmp\")\n imgMaster = imgMaster.convert()\n\n self.imgList = []\n\n\n imgSize = [(128, 64), (128, 64), (128,64), (128, 64)]\n offset = [(2, 78), (134, 78), (266, 78), (398, 78)]\n\n for i in range(4):\n tmpImg = pygame.Surface(imgSize[i])\n\n tmpImg.blit(imgMaster, (0, 0), (offset[i], imgSize[i]))\n transColor = tmpImg.get_at((1, 1))\n tmpImg.set_colorkey(transColor)\n self.imgList.append(tmpImg)\n\n def update(self):\n\n keys = pygame.key.get_pressed()\n if keys[pygame.K_RIGHT]:\n self.rect.centerx += 5\n if keys[pygame.K_LEFT]:\n self.rect.centerx -= 5\n if keys[pygame.K_UP]:\n self.rect.centery -= 5\n if keys[pygame.K_DOWN]:\n self.rect.centery += 5\n\n\n if self.rect.left < 0:\n self.rect.left = 0\n if self.rect.right > screen.get_width():\n self.rect.right = screen.get_width()\n if self.rect.top < 0:\n self.rect.top = 0\n if self.rect.bottom > screen.get_height():\n self.rect.bottom = screen.get_height()\n\n self.pause += 1\n if self.pause >= self.delay:\n self.pause = 0\n self.frame += 1\n if self.frame >= len(self.imgList):\n self.frame = 0\n\n self.image = self.imgList[self.frame]\n oldCenter = self.rect.center\n self.rect = self.image.get_rect()\n self.rect.center = oldCenter\n\ndef main():\n\n background = pygame.Surface(screen.get_size())\n background.fill((155, 155, 255))\n screen.blit(background, (0,0))\n\n chopper = Chopper()\n allSprites = pygame.sprite.Group(chopper)\n\n clock = pygame.time.Clock()\n keepGoing = True\n\n while keepGoing:\n clock.tick(30)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n keepGoing = False\n\n allSprites.clear(screen, background)\n allSprites. update()\n allSprites.draw(screen)\n pygame.display.flip()\n\nmain()\npygame.quit()\n","sub_path":"computer-science-i/misc/Heli.py","file_name":"Heli.py","file_ext":"py","file_size_in_byte":2651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"618701554","text":"from Robinhood import Robinhood\nimport json\nimport time\n\nwith open('config.json') as json_config_file:\n config = json.load(json_config_file)\n\ninstruments = {\n 'SYMC': 'https://api.robinhood.com/instruments/db06dee9-e342-4670-afb4-d6af005f3780/',\n 'P': 'https://api.robinhood.com/instruments/d15643d2-545c-4b83-a290-f85fe4575b0e/',\n 'UVXY': 'https://api.robinhood.com/instruments/00e90099-4281-4c93-b50d-fbd4d2469821/',\n}\n\n# robinhood\nmy_trader = Robinhood()\nmy_trader.login(username=config[\"rh_username\"],\n password=config[\"rh_password\"])\n\n\ndef buy_stock_market_price(symbol, cur_price):\n try:\n print('--------->buy', symbol, cur_price)\n stock_instrument = my_trader.instruments(symbol)[0]\n print('stock_instrument', stock_instrument)\n buy_order = my_trader.place_buy_order(stock_instrument, 1, cur_price)\n print(\"buy_order\", buy_order)\n except Exception as e:\n print(e)\n\n\ndef sell_stock_market_price(symbol, cur_price):\n try:\n print('--------->sell', symbol, cur_price)\n stock_instrument = my_trader.instruments(symbol)[0]\n print('stock_instrument', stock_instrument)\n sell_order = my_trader.place_sell_order(stock_instrument, 1, cur_price)\n print(\"sell_order\", sell_order)\n except Exception as e:\n print(e)\n\n\ndef get_last_order_id():\n order_history = my_trader.order_history()\n last_order = order_history['results'][0]\n print('last_order', last_order)\n return last_order['id']\n\n\ndef getInstrument(symbol):\n stock_instruments = my_trader.instruments(symbol)\n for instrument in stock_instruments:\n if instrument['symbol'] == symbol:\n return instrument\n\n\ndef limit_buy(symbol, price, buy_count):\n try:\n print('--------->limit buy', symbol, price)\n stock_instrument = getInstrument(symbol)\n print('stock_instrument', stock_instrument)\n buy_order = my_trader.place_order(\n stock_instrument,\n buy_count,\n price,\n 'buy',\n 'immediate',\n 'limit',\n 'gfd')\n print(\"buy_order\", buy_order)\n return get_last_order_id()\n except Exception as e:\n print(e)\n\n\ndef limit_sell(symbol, price, sell_count):\n try:\n print('--------->limit sell', symbol, price)\n stock_instrument = getInstrument(symbol)\n print('stock_instrument', stock_instrument)\n instrument_URL = stock_instrument['url']\n order = my_trader.place_limit_sell_order(\n instrument_URL,\n symbol,\n 'GFD',\n price,\n sell_count\n )\n print(\"sell_order\", order)\n return get_last_order_id()\n except Exception as e:\n print(e)\n\n\ndef cancelOrder(order_id):\n print('cancel_order id:', order_id)\n order = get_order(order_id)\n print('order', order)\n cancel_link = order['cancel']\n res = my_trader.session.post(cancel_link, timeout=15)\n res.raise_for_status()\n return res\n\n\ndef is_order_fulfilled(order_id):\n order = get_order(order_id)\n executions = order['executions']\n if len(executions) > 0:\n print('executions', executions)\n return float(executions[0]['price'])\n return False\n\n\ndef get_order(order_id):\n return my_trader.order_history(order_id)\n # last_order = order_history['results'][0]\n # print('last_order', last_order)\n # order_id, executions, cancel_link = last_order['id'], last_order['executions'], last_order['cancel']\n\n # if len(executions) > 0:\n # \treturn executions[0]['price'], None\n\n # count += 1\n # if count >= max_count:\n # \treturn None, cancel_link\n # time.sleep(interval_sec)\n\n\ndef get_quote(symbol):\n quote_info = my_trader.quote_data(symbol)\n print(\"quote_info\", quote_info)\n return quote_info['last_trade_price']\n\n\ndef get_last_extended_hours_quote(symbol):\n quote_info = my_trader.quote_data(symbol)\n print(\"quote_info\", quote_info)\n return quote_info['last_extended_hours_trade_price']\n","sub_path":"trader/rh_stock_trader.py","file_name":"rh_stock_trader.py","file_ext":"py","file_size_in_byte":4049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"278677607","text":"import sys\nfrom twython import Twython\n\nCONSUMER_KEY = 'lygeb5MmWPyTLZidxgSXHdFno'\nCONSUMER_SECRET = 'JmgS5lUfUT0ZrB36hYQJ5cOINpuM4lOEnbBXwESAefVOGslJG8'\nACCESS_TOKEN = '737403682111643648-PIeyxkm7XPj9ARta66Ly6AQJGQJPEz3'\nACCESS_SECRET = 'yy8qFkJ0TJhmx6DrfVs4FWneBwdyqxVRq2gfxGTZ0CtrL'\n\napi = Twython(CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN, ACCESS_SECRET)\n\n#gif = open('/home/pi/Weather Gifs/hot/200.gif', 'rb')\nresponse = api.upload_media(media=gif)\n\napi.update_status(status='Checkout this cool image!', media_ids=[response['media_id']])","sub_path":"TestTweetGifByTime.py","file_name":"TestTweetGifByTime.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"562973681","text":"\nimport torch\nimport torch.nn as nn\nimport torchvision.transforms as transforms\nimport torchvision.datasets as dsets\nfrom torch.autograd import Variable\n\n#transform makes sure it converts the MNIST dataset to a tensor\n#download = True we have to download it, if we're doing for the first time\ntrain_dataset = dsets.MNIST(root='./data', train=True, transform=transforms.ToTensor(), download=True)\n\ntest_dataset = dsets.MNIST(root='./data', train=False, transform = transforms.ToTensor())\n\nbatch_size = 100\nn_iters = 3000\nnum_epochs = n_iters/ (len(train_dataset)/batch_size)\nnum_epochs = int(num_epochs)\n\ntrain_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle = True) \n#create iterable object : training dataset\n\n#create iterable object : training dataset\ntest_loader=torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False)\n\n#Building model\n\nclass LogisticRegressionModel(nn.Module):\n def __init__(self, input_dim, output_dim):\n super(LogisticRegressionModel,self).__init__()\n self.linear = nn.Linear(input_dim, output_dim)\n \n def forward (self,x):\n out = self.linear(x)\n return out\n\ninput_dim=28*28\noutput_dim=10\nmodel = LogisticRegressionModel(input_dim, output_dim)\n\nmodel.cuda()\n\ncriterion = nn.CrossEntropyLoss() #don't use MSE like linear regresssion\n\nlearning_rate=.001\noptimizer = torch.optim.SGD(model.parameters(), lr = learning_rate)\n\niter = 0\nfor epoch in range(num_epochs):\n for i, (images, labels) in enumerate(train_loader):\n # Load images as Variable\n images = Variable(images.view(-1, 28*28).cuda())\n labels = Variable(labels.cuda())\n \n # Clear gradients w.r.t. parameters\n optimizer.zero_grad()\n \n # Forward pass to get output/logits\n outputs = model(images)\n \n # Calculate Loss: softmax --> cross entropy loss\n loss = criterion(outputs, labels)\n \n # Getting gradients w.r.t. parameters\n loss.backward()\n \n # Updating parameters\n optimizer.step()\n \n iter += 1\n \n if iter % 500 == 0:\n # Calculate Accuracy\n correct = 0\n total = 0\n # Iterate through test dataset\n for images, labels in test_loader:\n # Load images to a Torch Variable\n images = Variable(images.view(-1, 28*28).cuda())\n \n # Forward pass only to get logits/output\n outputs = model(images.cuda())\n \n # Get predictions from the maximum value\n _, predicted = torch.max(outputs.data, 1)\n \n # Total number of labels\n total += labels.size(0)\n \n # Total correct predictions\n correct += (predicted.cpu() == labels.cpu()).sum()\n \n accuracy = 100 * correct / total\n \n # Print Loss\n print(\"Iteration: {}. Loss: {}. Accuracy: {}\".format(iter, loss.item(), accuracy))\n\n","sub_path":"logistic_regression_pytorch.py","file_name":"logistic_regression_pytorch.py","file_ext":"py","file_size_in_byte":3107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"199393673","text":"# L = []\n# n =1\n# while n<=99:\n# L.append(n)\n# n = n + 2\n# L_half = L[::2]\n# L_3 = L[3:]\n# print(L_half)\n\n# L1 = ['Hello', 'World', 18, 'Apple', None]\n# L2 = [s.lower() for s in L1 if isinstance(s, str)]\n# print(L2)\n\n# def triangles():\n# r = [1]\n# while True:\n# yield r\n# r1=[0]+r\n# r2=r+[0]\n# r=[r1[i]+r2[i] for i in range(len(r1))]\n\n\n# def add(a, b, f):\n# \treturn f(a) + f(b)\n\n# print add(56, -33, abs)\n\n# L = [('Bob', 75), ('Adam', 92), ('Bart', 66), ('Lisa', 88)]\n#\n#\n# def by_name(t):\n# return t[0]\n#\n#\n# def by_score(t):\n# return t[1] * -1\n#\n#\n# L2 = sorted(L, key=by_name)\n# L3 = sorted(L, key=by_score)\n#\n# print(L2, L3)\n\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# from datetime import datetime\n# import os\n#\n# pwd = os.path.abspath('.')\n#\n# print(' Size Last Modified Name')\n# print('------------------------------------------------------------')\n#\n# for f in os.listdir(pwd):\n# fsize = os.path.getsize(f)\n# mtime = datetime.fromtimestamp(os.path.getmtime(f)).strftime('%Y-%m-%d %H:%M')\n# flag = '/' if os.path.isdir(f) else ''\n# print('%10d %s %s%s' % (fsize, mtime, f, flag))\n\nfrom multiprocessing import Process\nimport os\n\n\ndef run_proc(name):\n print('Run child process %s (%s)...' % (name, os.getpid()))\n\nif __name__ == '__main__':\n print('Parent process %s.' % os.getpid())\n p = Process(target=run_proc, args=('test',))\n print('Child process will start.')\n p.start()\n p.join()\n print('Child process end.')\n","sub_path":"python/Demos/demo_temp.py","file_name":"demo_temp.py","file_ext":"py","file_size_in_byte":1524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"188616039","text":"# -*- encoding=utf8 -*-\n__author__ = \"xuchu\"\n__title__ = \"Luka Reading 2.0\"\n__desc__ = \"Design by Xuchu\"\n\nimport time\nfrom airtest.core.api import *\nfrom poco.drivers.ios import iosPoco\npoco = iosPoco()\nusing(\"commonIOS.air\")\nfrom commonIOS import *\n\ndef Suite_FamilyLoop_Join():\n \"\"\"加入家庭圈\"\"\"\n # searchNumber = \"18513984761\"\n searchNumber = \"18800000002\"\n Suite_FamilyLoop_Manage_Init()\n poco(\"Other\").child(\"Other\").child(\"Other\").child(\"Other\").child(\"Button\")[1].click()\n poco(\"TextField\").click()\n text(searchNumber)\n poco(\"Button\").click()\n\n if not poco(\"暂时还没有家庭圈哦~\").exists():\n elSearchFamilyLoop = poco(\"Table\").children()\n countFamilyLoop = len(elSearchFamilyLoop) - 1\n print(\"[家庭圈数量]\", countFamilyLoop)\n print(\"[申请加入]\")\n poco(\"Table\").child(\"Cell\")[countFamilyLoop - 1].child(\"Button\").click()\n if poco(\"确定\").exists():\n poco(\"确定\").click()\n poco(\"close page\").click()\n click_by_name(\"arrow back\")\n click_by_name(\"arrow back\")\n else:\n print(\"确定button未出现,可能是待通过状态,现在返回家庭圈首页\")\n poco(\"close page\").click()\n click_by_name(\"arrow back\")\n click_by_name(\"arrow back\")\n else:\n print(\"[{0}]暂时没有家庭圈!\".format(searchNumber))\n poco(\"close page\").click()\n poco(\"arrow back\").click()\n poco(\"arrow back\").click()\n\nSuite_FamilyLoop_Join()","sub_path":"IOS/Suite_FamilyLoop_Join.air/Suite_FamilyLoop_Join.py","file_name":"Suite_FamilyLoop_Join.py","file_ext":"py","file_size_in_byte":1541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"335923227","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n给你二叉树的根节点 root ,返回它节点值的 前序 遍历。\n\n链接:https://leetcode-cn.com/problems/binary-tree-preorder-traversal/\n\"\"\"\nfrom typing import List\n\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n\nclass Solution:\n def preorderTraversal(self, root: TreeNode) -> List[int]:\n \"\"\"\n 思路:\n 根 -> 左 -> 右\n 1。 递归方法和中序遍历相似\n\n 2。 非递归方法,类似层序遍历,使用辅助 stack, 依次放入 [root.left, root.right],依次遍历 stack 中的元素\n 直到 stack 为空\n \"\"\"\n\n path, stack = [], [root]\n if not root:\n return path\n\n # 因为 stack 先入后出的性质,所以先 push root.right, 再 push root.left\n while stack:\n root = stack.pop()\n path.append(root.val)\n if root.right:\n stack.append(root.right)\n if root.left:\n stack.append(root.left)\n return path\n\n\n return path","sub_path":"Week_02/preorderTraversal.py","file_name":"preorderTraversal.py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"401153425","text":"import os\nimport asyncio\nimport numpy as np\nimport time\n\nimport pandas as pd\nfrom matplotlib import pyplot as plt\n\nfrom M1M3_FATABLE import *\n\nfrom lsst.ts import salobj\nfrom lsst.ts.idl.enums import MTM1M3\nfrom lsst.ts.idl.enums import MTHexapod\n\nm3ORC = 2.508\nm3IRC = 0.550\nm1ORC = 4.18\nm1IRC = 2.558\n\nfat = np.array(FATABLE)\nm1m3_actID = np.int16(fat[:, FATABLE_ID])\nm1m3_nActuator = m1m3_actID.shape[0]\nm1m3_xact = np.float64(fat[:, FATABLE_XPOSITION])\nm1m3_yact = np.float64(fat[:, FATABLE_YPOSITION])\n\naa = np.loadtxt('%s/notebooks/M2_FEA/data/M2_1um_72_force.txt'%(os.environ[\"HOME\"]))\n# to have +x going to right, and +y going up, we need to transpose and reverse x and y\nm2_xact = -aa[:,2]\nm2_yact = -aa[:,1]\n\nclass MyLogHandler:\n def __init__(self, nmsg):\n self.nmsg = nmsg\n self.nprint = 0\n def printLogMessage(self, data):\n if self.nprint < self.nmsg:\n print(f\"{data.level}: {data.message}\")\n self.nprint += 1\n\nasync def checkAOSCompStates(m1m3, m2, camhex, m2hex):\n\n #m1m3\n sstate = await m1m3.evt_summaryState.aget(timeout=5)\n print('starting with: M1M3 state',salobj.State(sstate.summaryState), pd.to_datetime(sstate.private_sndStamp, unit='s'))\n dstate = await m1m3.evt_detailedState.aget(timeout=200)\n print(' detailed state', MTM1M3.DetailedState(dstate.detailedState), \n pd.to_datetime(dstate.private_sndStamp, unit='s'))\n\n #m2\n sstate = await m2.evt_summaryState.aget(timeout=5)\n print('staring with: m2 state',salobj.State(sstate.summaryState), pd.to_datetime(sstate.private_sndStamp, unit='s'))\n\n #camhex\n state = await camhex.evt_summaryState.aget(timeout=5)\n print('staring with: cam hex state', salobj.State(state.summaryState), pd.to_datetime(state.private_sndStamp, unit='s'))\n dstate = await camhex.evt_controllerState.aget(timeout=5)\n print(' cam hex state', MTHexapod.EnabledSubstate(dstate.enabledSubstate), \n pd.to_datetime(dstate.private_sndStamp, unit='s'))\n\n #m2hex\n state = await m2hex.evt_summaryState.aget(timeout=5)\n print('staring with: M2 hex state', salobj.State(state.summaryState), pd.to_datetime(state.private_sndStamp, unit='s'))\n dstate = await m2hex.evt_controllerState.aget(timeout=5)\n print(' hex state', MTHexapod.EnabledSubstate(dstate.enabledSubstate), \n pd.to_datetime(dstate.private_sndStamp, unit='s')) \n \nasync def checkAOSSummaryStates(aos, m1m3, m2, camhex, m2hex):\n #aos\n sstate = await aos.evt_summaryState.aget(timeout=5)\n print('starting with: MTAOS state',salobj.State(sstate.summaryState), pd.to_datetime(sstate.private_sndStamp, unit='s'))\n\n await checkAOSCompStates(m1m3, m2, camhex, m2hex)\n\nasync def checkSlewCompStates(ptg, mount, rot):\n \n sstate = await ptg.evt_summaryState.aget(timeout=5)\n print('staring with: ptg state',salobj.State(sstate.summaryState), pd.to_datetime(sstate.private_sndStamp, unit='s'))\n \n sstate = await mount.evt_summaryState.aget(timeout=5)\n print('staring with: mount state',salobj.State(sstate.summaryState), pd.to_datetime(sstate.private_sndStamp, unit='s'))\n\n sstate = await rot.evt_summaryState.aget(timeout=5)\n print('staring with: rot state',salobj.State(sstate.summaryState), pd.to_datetime(sstate.private_sndStamp, unit='s'))\n\nasync def lowerM1M3(m1m3):\n m1m3.evt_detailedState.flush()\n await m1m3.cmd_lowerM1M3.set_start(lowerM1M3=True, timeout = 30)\n while True:\n state = await m1m3.evt_detailedState.next(flush=False, timeout=300)\n print('m1m3 state', MTM1M3.DetailedState(state.detailedState), pd.to_datetime(state.private_sndStamp, unit='s'))\n if (MTM1M3.DetailedState(state.detailedState) == MTM1M3.DetailedState.PARKED\n or MTM1M3.DetailedState(state.detailedState) == MTM1M3.DetailedState.PARKEDENGINEERING):\n break\n\nasync def raiseM1M3(m1m3):\n m1m3.evt_detailedState.flush()\n await m1m3.cmd_raiseM1M3.set_start(raiseM1M3=True, timeout = 30)\n while True:\n state = await m1m3.evt_detailedState.next(flush=False, timeout=300)\n print('m1m3 state', MTM1M3.DetailedState(state.detailedState), pd.to_datetime(state.private_sndStamp, unit='s'))\n if (MTM1M3.DetailedState(state.detailedState) == MTM1M3.DetailedState.ACTIVE\n or MTM1M3.DetailedState(state.detailedState) == MTM1M3.DetailedState.ACTIVEENGINEERING):\n break\n\nasync def readyM1M3(m1m3):\n \n m1m3Angle = await m1m3.tel_inclinometerData.next(flush=True, timeout=5)\n print(\"m1m3 inclinometer Angle = \", m1m3Angle.inclinometerAngle)\n \n dstate = await m1m3.evt_detailedState.aget(timeout=200)\n if MTM1M3.DetailedState(dstate.detailedState) == MTM1M3.DetailedState.PARKED:\n await raiseM1M3(m1m3)\n else:\n print('M1M3 is raised')\n \n m1m3HP = await m1m3.tel_hardpointActuatorData.aget()\n print(\"m1m3 z position = \", m1m3HP.zPosition)\n \n m1m3ForceBalance = await m1m3.evt_appliedBalanceForces.aget(timeout=10.)\n if not m1m3ForceBalance.forceMagnitude:\n m1m3.evt_appliedBalanceForces.flush()\n await m1m3.cmd_enableHardpointCorrections.set_start(timeout=10)\n m1m3ForceBalance = await m1m3.evt_appliedBalanceForces.next(flush=False, timeout=10.)\n m1m3ForceBalance = await m1m3.evt_appliedBalanceForces.aget(timeout=10.)\n print(\"Magnitude of the m1m3 force balance system\", m1m3ForceBalance.forceMagnitude)\n \n ### no activeopticForces (or any other hunman-applied forces) to start with\n fz = [0]*156\n await m1m3.cmd_applyAberrationForces.set_start(zForces=fz)\n await m1m3.cmd_applyActiveOpticForces.set_start(zForces=fz)\n \nasync def plotM1M3Forces(m1m3):\n \n fel = await m1m3.evt_appliedElevationForces.aget(timeout=10.)\n fba = await m1m3.evt_appliedBalanceForces.aget(timeout=10.)\n fst = await m1m3.evt_appliedStaticForces.aget(timeout=10.)\n fao = await m1m3.evt_appliedActiveOpticForces.aget(timeout=10.)\n \n ftel = await m1m3.tel_forceActuatorData.aget(timeout=10.)\n \n fig, ax = plt.subplots(3,1, figsize=(15,8))\n ax[0].plot(fel.xForces, '-o', label='elevation');\n ax[0].plot(fba.xForces, label='FB')\n ax[0].plot(fst.xForces, label='static')\n ax[0].plot(ftel.xForce, '-v', label='measured')\n ax[0].legend()\n ax[0].set_title('XForces')\n ax[1].plot(fel.yForces, '-o', label='elevation');\n #ax[1].plot(fba.yForces, label='FB')\n #ax[1].plot(fst.yForces, label='static')\n ax[1].plot(ftel.yForce, '-v', label='measured')\n ax[1].legend()\n ax[1].set_title('YForces')\n ax[2].plot(fel.zForces, '-o', label='elevation');\n ax[2].plot(fba.zForces, label='FB')\n ax[2].plot(fst.zForces, label='static')\n ax[2].plot(fao.zForces, label='AOS')\n ax[2].plot(ftel.zForce, '-v', label='measured')\n ax[2].set_title('ZForces')\n ax[2].legend()\n \n fig2, ax=plt.subplots( 1,3, figsize = [15,4])\n aa = np.array(fao.zForces)\n img = ax[0].scatter(m1m3_xact, m1m3_yact, c=aa, s=abs(aa)*2)\n #plt.jet()\n ax[0].axis('equal')\n ax[0].set_title('AOS forces')\n fig.colorbar(img, ax=ax[0])\n\n aa = np.array(fel.zForces)\n img = ax[1].scatter(m1m3_xact, m1m3_yact, c=aa, s=abs(aa)*0.1)\n #plt.jet()\n ax[1].axis('equal')\n ax[1].set_title('elevation forces')\n fig.colorbar(img, ax=ax[1])\n \n aa = np.array(fst.zForces)\n img = ax[2].scatter(m1m3_xact, m1m3_yact, c=aa, s=abs(aa)*10)\n #plt.jet()\n ax[2].axis('equal')\n ax[2].set_title('static forces')\n fig.colorbar(img, ax=ax[2])\n \nasync def plotM2Forces(m2):\n \n axialForces = await m2.tel_axialForce.aget(timeout=2)\n tangentForces = await m2.tel_tangentForce.aget(timeout=2)\n\n fig, ax = plt.subplots(2,1, figsize=(15,8))\n ax[0].plot(axialForces.measured, label='measured');\n ax[0].plot(axialForces.applied, label='applied');\n ax[0].plot(axialForces.hardpointCorrection,'.', label='FB');\n ax[0].plot(axialForces.lutGravity, label='LUT G');\n ax[0].legend()\n ax[1].plot(tangentForces.measured, label='measured');\n ax[1].plot(tangentForces.applied, label='applied');\n ax[1].plot(tangentForces.hardpointCorrection, 'o', label='FB');\n ax[1].plot(tangentForces.lutGravity, label='LUT G');\n ax[1].legend()\n\n fig2, ax=plt.subplots( 1,2, figsize = [10,4])\n aa = np.array(axialForces.measured)\n img = ax[0].scatter(m2_xact, m2_yact, c=aa, s=abs(aa)*2)\n #plt.jet()\n ax[0].axis('equal')\n ax[0].set_title('measured forces')\n fig.colorbar(img, ax=ax[0])\n\n aa = np.array(axialForces.applied)\n img = ax[1].scatter(m2_xact, m2_yact, c=aa, s=abs(aa)*2)\n #plt.jet()\n ax[1].axis('equal')\n ax[1].set_title('applied forces')\n fig.colorbar(img, ax=ax[1]) \n \nasync def readyM2(m2):\n \n zAngle = await m2.tel_zenithAngle.next(flush=True, timeout=5)\n print('m2 inclinometer angle = ', zAngle.measured)\n \n m2ForceBalance = await m2.evt_forceBalanceSystemStatus.aget(timeout=10.)\n if not m2ForceBalance.status:\n await m2.cmd_switchForceBalanceSystem.set_start(status=True, timeout=10)\n m2ForceBalance = await m2.evt_forceBalanceSystemStatus.aget(timeout=10.)\n print(\"Status of the M2 force balance system\", m2ForceBalance.status)\n\n print('clear any M2 activeopticForces (or any other hunman-applied forces)')\n await m2.cmd_resetForceOffsets.set_start()\n \nasync def moveHexaTo0(hexa):\n ### command it to collimated position (based on LUT)\n \n need_to_move = False\n try:\n posU = await hexa.evt_uncompensatedPosition.aget(timeout=10.)\n if abs(max([getattr(posU, i) for i in 'xyzuvw']))<1e-8:\n print('hexapod already at LUT position')\n else:\n need_to_move = True\n except TimeoutError:\n need_to_move = True\n if need_to_move:\n hexa.evt_inPosition.flush()\n #according to XML, units are micron and degree\n await hexa.cmd_move.set_start(x=0,y=0,z=0, u=0,v=0,w=0,sync=True)\n while True:\n state = await hexa.evt_inPosition.next(flush=False, timeout=10)\n print(\"hexa in position?\",state.inPosition, pd.to_datetime(state.private_sndStamp, unit='s'))\n if state.inPosition:\n break\n await printHexaPosition(hexa)\n \nasync def printHexaPosition(hexa):\n pos = await hexa.tel_application.next(flush=True, timeout=10.)\n print(\"Current Hexapod position\")\n print(\" \".join(f\"{p:10.2f}\" for p in pos.position[:3]), end = ' ') \n print(\" \".join(f\"{p:10.6f}\" for p in pos.position[3:]) )\n \nasync def printHexaUncompensatedAndCompensated(hexa):\n posU = await hexa.evt_uncompensatedPosition.aget(timeout=10.)\n print('Uncompensated position')\n print(\" \".join(f\"{p:10.2f}\" for p in [getattr(posU, i) for i in 'xyz']), end = ' ')\n print(\" \".join(f\"{p:10.6f}\" for p in [getattr(posU, i) for i in 'uvw']),' ',\n pd.to_datetime(posU.private_sndStamp, unit='s')) \n posC = await hexa.evt_compensatedPosition.aget(timeout=10.)\n print('Compensated position')\n print(\" \".join(f\"{p:10.2f}\" for p in [getattr(posC, i) for i in 'xyz']), end = ' ')\n print(\" \".join(f\"{p:10.6f}\" for p in [getattr(posC, i) for i in 'uvw']),' ',\n pd.to_datetime(posC.private_sndStamp, unit='s'))\n\nasync def readyHexaForAOS(hexa):\n settings = await hexa.evt_settingsApplied.aget(timeout = 10.)\n hasSettings = 0\n if hasattr(settings, 'settingsVersion'):\n print('settingsVersion = ', settings.settingsVersion, pd.to_datetime(settings.private_sndStamp, unit='s'))\n hasSettings = 1\n if (not hasSettings) or (not settings.settingsVersion[:12] == 'default.yaml'):\n print('YOU NEED TO SEND THIS HEXAPOD TO STANDBY, THEN LOAD THE PROPER CONFIG')\n else:\n hexaConfig = await hexa.evt_configuration.aget(timeout=10.)\n print(\"pivot at (%.0f, %.0f, %.0f) microns \"%(hexaConfig.pivotX, hexaConfig.pivotY, hexaConfig.pivotZ))\n print(\"maxXY = \", hexaConfig.maxXY, \"microns, maxZ= \", hexaConfig.maxZ, \" microns\")\n print(\"maxUV = \", hexaConfig.maxUV, \"deg, maxW= \", hexaConfig.maxW, \" deg\")\n\n lutMode = await hexa.evt_compensationMode.aget(timeout=10)\n if not lutMode.enabled:\n hexa.evt_compensationMode.flush()\n await hexa.cmd_setCompensationMode.set_start(enable=1, timeout=10)\n lutMode = await hexa.evt_compensationMode.next(flush=False, timeout=10)\n print(\"compsensation mode enabled?\",lutMode.enabled, pd.to_datetime(lutMode.private_sndStamp, unit='s'))\n await moveHexaTo0(hexa)\n await printHexaUncompensatedAndCompensated(hexa)\n print(\"Does the hexapod has enough inputs to do LUT compensation? (If the below times out, we do not.)\")\n #Note: the target events are what the hexa CSC checks; if one is missing, the entire LUT will not be applied\n #it also needs to see an uncompensatedPosition (a move would trigger that) in order to move to the compensatedPosition\n a = await hexa.evt_compensationOffset.aget(timeout=10.)\n print('mount elevation = ', a.elevation)\n print('mount azimth = ', a.azimuth)\n print('rotator angle = ', a.rotation)\n print('? temperature = ', a.temperature)\n print('x,y,z,u,v,w = ', a.x, a.y, a.z, a.u, a.v, a.w)\n\n\nasync def ofcSentApplied(aos, m1m3, m2, camhex, m2hex, make_plot=False):\n dof = await aos.evt_degreeOfFreedom.aget(timeout = 5.)\n m1m3C = await aos.evt_m1m3Correction.aget(timeout = 5.)\n m2C = await aos.evt_m2Correction.aget(timeout = 5.)\n camhexC = await aos.evt_cameraHexapodCorrection.aget(timeout = 5.)\n m2hexC = await aos.evt_m2HexapodCorrection.aget(timeout = 5.)\n \n aggregated_dof = np.array(dof.aggregatedDoF)\n visit_dof = np.array(dof.visitDoF)\n m1m3C = m1m3C.zForces\n m2C = m2C.zForces\n camhexC = np.array([getattr(camhexC,i) for i in ['x', 'y', 'z', 'u','v','w']])\n m2hexC = np.array([getattr(m2hexC,i) for i in ['x', 'y', 'z', 'u','v','w']])\n \n print('DOF event time = ', pd.to_datetime(dof.private_sndStamp, unit='s'))\n \n m1m3F = await m1m3.evt_appliedActiveOpticForces.aget(timeout = 5.)\n m2F = await m2.tel_axialForce.next(flush=True, timeout=5.)\n camhexP = await camhex.evt_uncompensatedPosition.aget(timeout = 5.)\n m2hexP = await m2hex.evt_uncompensatedPosition.aget(timeout = 5.)\n \n m1m3F = m1m3F.zForces\n m2F = m2F.applied\n camhexP = np.array([getattr(camhexP,i) for i in ['x','y','z', 'u','v','w']]) \n m2hexP = np.array([getattr(m2hexP,i) for i in ['x','y','z', 'u','v','w']])\n \n if make_plot:\n fig, ax = plt.subplots(2,3, figsize=(19,8) )\n ##--------------------------------------\n ax[0][0].plot(aggregated_dof[:10],'-bo', label='aggregatedDoF')\n ax[0][0].plot(visit_dof[:10],'-rx', label='visitDoF')\n ax[0][0].set_title('hexapod DoF')\n ax[0][0].legend()\n\n ax[0][1].plot(aggregated_dof[10:], '-bo', label='aggregatedDoF')\n ax[0][1].plot(visit_dof[10:],'-rx', label='visitDoF')\n ax[0][1].set_title('Mirrors DoF')\n ax[0][1].legend()\n\n ##--------------------------------------\n ax[0][2].plot(m1m3C,'-o', label='forces sent')\n ax[0][2].plot(m1m3F, '-rx', label='forces applied')\n ax[0][2].set_title('M1M3 Forces')\n ax[0][2].legend()\n\n ax[1][0].plot(m2C,'-o', label='forces sent')\n ax[1][0].plot(m2F, '-x', label='forces applied')\n ax[1][0].set_title('M2 Forces')\n ax[1][0].legend()\n\n ##--------------------------------------\n ax[1][1].plot(camhexC[:3], '-ro', label='cam hex xyz Sent', markersize=8)\n ax[1][1].plot(m2hexC[:3],'-bx', label='m2 hex xyz Sent')\n ax[1][1].plot(camhexP[:3], '-o', label='cam hex xyz Applied')\n ax[1][1].plot(m2hexP[:3], '-v', label='m2 hex xyz Applied')\n ax[1][1].set_title('Hex xyz')\n ax[1][1].legend()\n\n ax[1][2].plot(camhexC[3:], '-ro', label='cam hex uvw Sent')\n ax[1][2].plot(m2hexC[3:], '-bx', label='m2 hex uvw Sent')\n ax[1][2].plot(camhexP[3:], '-o', label='cam hex uvw Applied')\n ax[1][2].plot(m2hexP[3:], '-v', label='m2 hex uvw Applied')\n ax[1][2].set_title('M2 Hex xyzuvw')\n ax[1][2].legend()\n \n ofc_dict = {}\n ofc_dict['aggregated_dof'] = aggregated_dof\n ofc_dict['visit_dof'] = visit_dof\n ofc_dict['m1m3C'] = m1m3C\n ofc_dict['m2C'] = m2C\n ofc_dict['camhexC'] = camhexC\n ofc_dict['m2hexC'] = m2hexC\n ofc_dict['m1m3F'] = m1m3F\n ofc_dict['m2F'] = m2F\n ofc_dict['camhexP'] = camhexP\n ofc_dict['m2hexP'] = m2hexP\n \n print('If corrections have been issued, we should always expect sent (xxC) to match applied (xxF & xxP)')\n return ofc_dict\n \nasync def moveMountConstantV(mount, startAngle, stopAngle):\n #change the elevation angle step by step\n\n freq = 1 #Hz\n vAngle = 2 #1 deg change per minute\n holdMinutes = 0.1 #how long to hold at integeter values of the elevation angle\n angleStepSize = 1 #each time we change by 1 deg, before we hold in place\n\n rampMinutes = angleStepSize/vAngle\n print('This will run for %.0f minutes'%((startAngle - stopAngle)*(rampMinutes+holdMinutes)))\n start_time = Time(datetime.now())\n startTime = time.time()\n end_time = start_time + timedelta(minutes=80)\n demandAngle = startAngle\n while demandAngle > stopAngle-0.01:\n await asyncio.sleep(1.0/freq)\n\n timeNow = time.time()\n minutesEllapsed = (timeNow - startTime)/60\n cyclePassed = np.floor(minutesEllapsed/(rampMinutes+holdMinutes))\n minutesIntoThisCycle = min(rampMinutes, minutesEllapsed - cyclePassed*(rampMinutes+holdMinutes))\n demandAngle = startAngle - cyclePassed*angleStepSize - minutesIntoThisCycle * vAngle\n #print(demandAngle, cyclePassed, minutesIntoThisCycle)\n await mount.cmd_moveToTarget.set_start(azimuth=0, elevation=demandAngle)\n \n ","sub_path":"bxin/aos2comp/aosTools.py","file_name":"aosTools.py","file_ext":"py","file_size_in_byte":17936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"342434525","text":"import datetime\nimport time\nimport csv\nimport os\nimport ast\nimport glob\nfrom math import log\nfrom sense_hat import SenseHat\nfrom weather import get_timestamp\nfrom sendText import *\n\n\ndef get_csv_data():\n \"\"\"Open the daily csv log and return the content\"\"\"\n global csv_path\n csv_list = []\n day = get_timestamp().split()[0]\n csv_path = os.path.join(os.path.dirname(os.path.abspath(__file__)) + '/logs/', day + '.csv')\n # csv_path = '/home/pi/Pi_Weather_Station/src/logs/' + day + '.csv'\n with open(csv_path, 'r') as csv_file:\n # content = f.read()\n csv_reader = csv.reader(csv_file, delimiter=',')\n for row in csv_reader:\n # print(row)\n csv_list.append(row)\n return csv_list\n\nget_csv_data()\n\ndef get_dark_sky():\n \"\"\"Read the most recent dark sky log and return a list of the stats\"\"\"\n csv_content = get_csv_data()\n most_recent = csv_content[-1]\n dark_sky_string = most_recent[9]\n dark_sky_list = dark_sky_string.strip('][').split(', ')\n ds_temp = dark_sky_list[0]\n ds_cond = dark_sky_list[1].strip(\"'\")\n ds_fore = dark_sky_list[2].strip(\"'\")\n return [ds_temp, ds_cond, ds_fore]\n\n# print(os.path.basename(csv_path))\n# print(csv_path[:-3] + 'alert')\n\n\n\ndef check_min():\n try:\n # alert_cont = read_alert()\n minimum_temp = 74\n current_temp = get_dark_sky()[0]\n current_temp = float(current_temp) \n if current_temp <= minimum_temp:\n print('It is 74 degrees or cooler! Time to open the windows')\n return True\n else:\n print('Temperature is within limit set')\n return False\n except:\n print('That did not work.')\n print('probably did not have a value set for minimum temp')\n\nalert_file_path = csv_path[:-3] + 'alert'\n\nif os.path.exists(alert_file_path) == False:\n print('no alert file detected')\n if check_min() == True:\n print(\"Temp reached! creating alert flag\")\n alert_flag = open(alert_file_path, 'w+')\n print(\"Sending Text\")\n send_email('It is 74 degrees or cooler! Time to open the windows')\n","sub_path":"src/window_alerts.py","file_name":"window_alerts.py","file_ext":"py","file_size_in_byte":2132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"586192382","text":"n, k = [int(i) for i in input().split()]\n# 何種類あるかわからないので寿司の数だけtopを用意する\ntop = [0] * n\nsub = []\n\nfor i in range(n):\n t, d = [int(i) for i in input().split()]\n t -= 1\n\n # 各ネタの暫定topのみの配列がtop\n if top[t] == 0:\n top[t] = d\n else:\n if top[t] >= d:\n # ちっさい場合はtop以外の2軍=subに追加\n sub.append(d)\n else:\n # 大きい場合は元々の暫定topをsubに追加し、暫定topを変更\n sub.append(top[t])\n top[t] = d\n\n# topを大きい順にそーと\ntop.sort()\ntop = top[::-1]\nres = [0]\n\n# 寿司の数でループ\nfor i in range(n):\n if top[i] != 0:\n # 前回のtopの値(res[-1])+今回のtopのあたいを足す\n # resの例は[0, 10(最大), 17(10+7), ..., -1, -1, -1]\n res.append(res[-1] + top[i])\n else:\n res.append(-1)\n\n# subを大きい順にそーと\nsub.sort()\nsub = sub[::-1]\nw = [0]\n\n# subの分だけループ\nfor i in range(len(sub)):\n # 前回のsubの値(w[-1])+今回のsubのあたいを足す\n # subの例は[0, 9(最大), 17(9+8), ..., 1, 1]\n w.append(w[-1] + sub[i])\n\ntmp = 0\n\n# 寿司ネタの種類でループ\nfor i in range(1, k+1):\n # subから生成したwの配列のながさ が \n # 選ぶ寿司の数(k)-選ぶ寿司の種類(topから選択される数i)より大きければ\n # 例えばsubの数が2個、選ぶ数が4個、選ぶ種類が1の場合、選択不可能\n # topの数1 + subの数2 が選ぶ数より少ないため\n # かつtopの値が-1でなければ(これはループで指定しているため不要か) \n\n if len(w)-1 + i >= k and res[i] != -1:\n # 従来の最高とどっちが大きいか比較する\n # res[i]はi種類の時それぞれの種類のtopを足したもの\n # w[k-1]はsubを大きい順に並べた時の1~k-i個までの和(topとsub足してk個になる)\n \n # 難しいが寿司1のtopが美味しさ10、subなし、寿司2のtop6,sub5だったとして\n # 1種類のループに寿司2のsubが含まれる\n # しかしこれは明らかに最高値にはならない\n # 後に選択されるtopから2種類選んだほうが良いからである\n # つまりこの問題では、4種類が最高だった場合、subの上からk-i番目まで\n # 確実に4種類で構成されることになる\n\n # 逆に考えると、美味しさが小さいtopを含めば種類ポイントによって\n # 点数が上がる可能性があるというように考える\n # つまりtop以外は全て最善、最高で良い\n\n # 公式の解説とも繋がる部分がある\n # 公式では単純な最高値を求め、そこから入れ替える方式を取っているが\n # 組み入れはiのループ、組み出しはw[-1]を省くことでできているという訳である\n tmp = max(res[i] + w[k-i] + i*i ,tmp)\n print(tmp)\n else:\n print(tmp)\n\nprint(tmp)\n","sub_path":"ABC/ABC110-/ABC116/D.py","file_name":"D.py","file_ext":"py","file_size_in_byte":3118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"198639266","text":"import acm\nimport csv\nimport os\n\nfrom datetime import datetime\nfrom at_logging import getLogger\nfrom at_ael_variables import AelVariableHandler\n\nLOGGER = getLogger(__name__)\nFOOTER = ['*0RCM-END00000001']\nVAL_FOOTER = ['*6RCM-END00000001']\n\nVAL_HEADER = ['*Comment', 'Version', 'Message Type', 'Action', 'USI Prefix', 'USI Value', 'Primary Asset Class',\n 'Secondary Asset Class', 'Trade Party 1 Prefix', 'Trade Party 1 Value',\n 'Trade Party 2 Prefix', 'Trade Party 2 Value', 'Data Submitter Prefix', 'Data Submitter Value',\n 'Submitted For Prefix', 'Submitted For Value', 'Cleared Product ID',\n 'Valuation Datetime', 'MTM Value', 'MTM Currency', 'Valuation Source', 'Valuation Reference Model',\n 'Additional Comments', 'Data Submitter Message Id', 'Valuation Datetime Party 2',\n 'MTM Value Party 2', 'MTM Currency Party 2', 'Valuation Type Party 1', 'Valuation Type Party 2',\n 'UTI Prefix', 'UTI', 'MTM Value CCP', 'MTM Currency CCP', 'Valuation Type CCP',\n 'Valuation Datetime CCP', 'Trade Party 1 Transaction Id', 'Trade Party 2 Transaction Id', 'sendTo',\n 'Trade Party 1 - Reporting Destination', 'Party 2 Reporting Obligation',\n 'Trade Party 1 -Execution Agent ID Type', 'Trade Party 1 - Execution Agent ID',\n 'Trade Party 2 -Execution Agent ID Type', 'Trade Party 2 - Execution Agent ID', 'AI Party 1 - Type',\n 'AI Party 1 - ID', 'AI Party 2 - Type', 'AI Party 2 - ID', 'Clearing Status',\n 'Trade Party 1 - Action Type', 'Trade Party 2 - Action Type', 'Level',\n 'Trade Party 1 - Third Party Viewer ID Type',\n 'Trade Party 1 - Third Party Viewer ID', 'Trade Party 2 - Third Party Viewer ID Type',\n 'Trade Party 2 - Third Party Viewer ID', 'Reporting Timestamp']\nCOL_HEADER = ['*Comment', 'Version', 'Message Type', 'Data Submitter Message ID', 'Action', 'Data Submitter prefix',\n 'Data Submitter value', 'Trade Party Prefix', 'Trade Party Value',\n 'Execution Agent Party Value Prefix', 'Execution Agent Party Value', 'Collateral Portfolio Code',\n 'Collateral Portfolio Indicator', 'Value of the collateral', 'Currency of the collateral',\n 'Collateral Valuation Date Time', 'Collateral Reporting Date', 'sendTo', 'Execution Agent Masking Flag',\n 'Trade Party - Reporting Obligation', 'Other Party ID Type', 'Other Party ID',\n 'Collateralized', 'Initial Margin Posted', 'Currency of the Initial Margin Posted',\n 'Initial Margin Received', 'Currency of the Initial Margin Received', 'Variation Margin Posted',\n 'Currency of the Variation Margin Posted', 'Variation Margin Received',\n 'Currency of the Variation Margin Received', 'Excess Collateral Posted',\n 'Currency of the Excess Collateral Posted',\n 'Excess Collateral Received', 'Currency of the Excess Collateral received', 'Third Party Viewer',\n 'Reserved - Participant Use 1', 'Reserved - Participant Use 2', 'Reserved - Participant Use 3',\n 'Reserved - Participant Use 4', 'Reserved - Participant Use 5', 'Action Type Party 1',\n 'Third Party Viewer ID Type', 'Level', 'Reporting Timestamp']\nNEW_HEADER = ['*Comment', 'Version', 'Message Type', 'Data Submitter Message ID', 'Action', 'Data Submitter prefix',\n 'Data Submitter value', 'Trade Party Prefix', 'Trade Party Value',\n 'Execution Agent Party Prefix', 'Execution Agent Party Value', 'UTI Prefix', 'UTI Value', 'USI Prefix',\n 'USI Value', 'Trade Party Transaction Id', 'Collateral Portfolio code', 'Collateralized',\n 'sendTo', 'Trade Party - Reporting Obligation', 'Other Party ID Type', 'Other Party ID',\n 'Action Type Party 1', 'Third Party Viewer ', 'Collateral Portfolio Indicator', 'Initial Margin Posted',\n 'Currency of the Initial Margin Posted', 'Initial Margin Received',\n 'Currency of the Initial Margin Received', 'Variation Margin Posted',\n 'Currency of the Variation Margin Posted',\n 'Variation Margin Received', 'Currency of the Variation Margin Received', 'Excess Collateral Posted',\n 'Currency of the Excess Collateral Posted', 'Excess Collateral Received',\n 'Currency of the Excess Collateral received', 'Reserved - Participant Use 1',\n 'Reserved - Participant Use 2', 'Reserved - Participant Use 3', 'Reserved - Participant Use 4',\n 'Reserved - Participant Use 5', 'Level', 'Third Party Viewer ID Type', 'Execution Agent Masking Flag']\n\nael_variables = AelVariableHandler()\nael_variables.add('output_folder',\n label='Output Folder',\n mandatory=True,\n default='Y:\\Jhb\\FAReports\\AtlasEndOfDay\\Delegated_Reporting')\n\nael_variables.add('input_folder',\n label='Input Folder',\n mandatory=True,\n default='Y:\\Jhb\\IT_Pricing_Risk\\Data\\Prod\\MarketRisk\\Apex')\n\nael_variables.add('collateral_position_file',\n label='Collateral Position File',\n mandatory=True,\n default='CollateralBalancePositions.csv')\n\nael_variables.add('collateral_portfolios',\n label='Counterparty Collateral Choice List',\n cls=acm.FChoiceList,\n collection=sorted(acm.FChoiceList.Choices()),\n default=acm.FChoiceList['EMIR Collateral Portfolio List'])\n\nael_variables.add('delegated_reporting_new_deals',\n label='Delegated Reporting New Deals',\n cls=acm.FStoredASQLQuery,\n collection=sorted(acm.FStoredASQLQuery.Select(\"subType='FTrade'\")),\n default=acm.FStoredASQLQuery['DelegatedReportingNewDeals'])\n\nael_variables.add('delegated_reporting_valuation',\n label='Delegated Reporting Valuation Deals',\n cls=acm.FStoredASQLQuery,\n collection=sorted(acm.FStoredASQLQuery.Select(\"subType='FTrade'\")),\n default=acm.FStoredASQLQuery['DelegateReporting_All'])\n\nael_variables.add('data_submitter_value',\n label='Data Submitter Value',\n mandatory=True,\n default='SLI1CVYMJ21DST0Q8K25')\n\nael_variables.add('trade_prefix',\n label='Trade Party Prefix',\n mandatory=True,\n default='LEI')\n\n\ndef validate(date_text):\n try:\n if datetime.strptime(date_text, '%Y-%m-%d'):\n return True\n except ValueError:\n return False\n\n\ndef return_latest_date_folder(path):\n return sorted([name for name in os.listdir(path) if validate(name) and os.path.isdir('{}/{}'.format(path, name))],\n reverse=True)[0]\n\n\ndef create_output_location(output_file):\n try:\n if not os.path.exists(output_file):\n os.makedirs(output_file)\n return True\n except ValueError:\n return False\n\n\ndef preprocess_collateral_data(collateral_input_file):\n \"\"\"\n This will compile all collateral data needed\n \"\"\"\n collateral_data = {}\n with open(collateral_input_file) as data:\n results_reader = csv.reader(data, delimiter=',')\n for row in results_reader:\n agreement_id = row[0]\n collateral_balance = row[24]\n collateral_currency = row[25]\n collateral_data[agreement_id] = {'CollateralBalance': collateral_balance,\n 'CollateralCurrency': collateral_currency}\n return collateral_data\n\n\ndef write_csv_file(output_location, result_list, header_list, footer):\n \"\"\"\n Create a file to store all results\n \"\"\"\n with open(output_location, 'wb') as recon_file:\n recon_writer = csv.writer(recon_file)\n recon_writer.writerow(header_list)\n for item in result_list:\n recon_writer.writerow(item)\n recon_writer.writerow(footer)\n\n\ndef new_deals_event(new_trades_list, data_submitter, trade_prefix):\n \"\"\"\n This creates csv data for new deals events\n \"\"\"\n LOGGER.info(\"Processing new trades event\")\n\n new_deals_data = []\n for trade in new_trades_list:\n uti = 'MARKITWIRE{0}'.format(trade.add_info('CCPmiddleware_id'))\n counterparty_lei = trade.Counterparty().LegalEntityId()\n new_deals_data.append(\n ['Trade', 'Coll1.0', 'CollateralLink', '', 'NEW', trade_prefix, data_submitter,\n trade_prefix, counterparty_lei, '', '', '0000452A', uti, '', '', '',\n '1001', 'Partial', '', 'ESMA', trade_prefix, data_submitter, 'V', '', 'Y', '', '', '', '',\n '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', ''])\n return new_deals_data\n\n\ndef collateral_valuation_event(collateral_dictionary, collateral_choice_list, data_submitter, trade_prefix):\n \"\"\"\n This will create csv data for valuation on collateral\n \"\"\"\n LOGGER.info(\"Processing collateral valuation event\")\n col_data = []\n for choice in collateral_choice_list.Choices():\n portfolio = choice.Name()\n currency = collateral_dictionary[portfolio]['CollateralCurrency']\n balance = collateral_dictionary[portfolio]['CollateralBalance']\n valuation_time = datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ')\n if acm.FCounterParty[choice.Description()]:\n counterparty_lei = acm.FCounterParty[choice.Description()].LegalEntityId()\n elif acm.FClient[choice.Description()]:\n counterparty_lei = acm.FClient[choice.Description()].LegalEntityId()\n else:\n counterparty_lei = \"\"\n col_data.append(\n ['ABSA Collateral', 'Coll1.0', 'CollateralValue', '', 'New', trade_prefix, data_submitter,\n trade_prefix, counterparty_lei, '', '', '1001', 'Y', '', '', valuation_time, '', '', '',\n 'ESMA', trade_prefix, data_submitter, 'Partially', '', '', '', '', '0', currency,\n balance, currency, '0', currency, '0', currency, '', '', '', '', '', '', 'V', '', '',\n valuation_time])\n return col_data\n\n\ndef valuation_event(trade_list, data_submitter, trade_prefix):\n \"\"\"\n This will create csv data for valuation event\n \"\"\"\n LOGGER.info(\"Processing valuation event\")\n std_calculation_space_collection = acm.Calculations().CreateStandardCalculationsSpaceCollection()\n val_data = []\n for trade in trade_list:\n counterparty_name = trade.Counterparty().Name()\n trd_nbr = trade.Name()\n uti = 'MARKITWIRE{0}'.format(trade.add_info('CCPmiddleware_id'))\n mtm_value = trade.Calculation().MarkToMarketValue(std_calculation_space_collection).Number()\n valuation_time = datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ')\n counterparty_lei = trade.Counterparty().LegalEntityId()\n currency = trade.Currency().Name()\n val_data.append(\n [trd_nbr, '', 'Valuation', 'New', '', '', 'InterestRate', '', trade_prefix, counterparty_lei,\n trade_prefix, data_submitter, trade_prefix, data_submitter,\n trade_prefix, counterparty_lei, '', valuation_time, mtm_value, currency, '', '', '',\n counterparty_name, '', '', '', 'MarkToMarket', '', '0000452A', uti, '', '', '', '', trd_nbr, 'EXTERNAL',\n '', 'ESMA', '', '', '', '', '', '', '', '', '', 'FALSE', 'V', '', 'T', '', '', '', '',\n valuation_time])\n return val_data\n\n\ndef ael_main(ael_dict):\n LOGGER.info(\"Start processing EMIR reporting data\")\n today = datetime.now().strftime('%Y-%m-%d')\n folder = \"{0}/{1}\".format(ael_dict['output_folder'], today)\n create_output_location(folder)\n new_trades = ael_dict['delegated_reporting_new_deals'].Query().Select()\n if len(new_trades) > 0:\n new_deals = new_deals_event(new_trades, str(ael_dict['trade_prefix']), str(ael_dict['data_submitter_value']))\n new_deals_file = \"{0}/New Deals - Reported_{1}.csv\".format(folder,\n today)\n LOGGER.info(\"Writing new deals event data to {}\".format(new_deals_file))\n write_csv_file(new_deals_file, new_deals, NEW_HEADER, FOOTER)\n else:\n LOGGER.info(\"No new trades\")\n\n collateral_input_file = r'{0}/{1}/{2}'.format(ael_dict['input_folder'],\n return_latest_date_folder(ael_dict['input_folder']),\n ael_dict['collateral_position_file'])\n collateral_data = preprocess_collateral_data(collateral_input_file)\n col_data = collateral_valuation_event(collateral_data, ael_dict['collateral_portfolios'],\n str(ael_dict['trade_prefix']), str(ael_dict['data_submitter_value']))\n collateral_file = \"{0}/Collateral_Reported_{1}.csv\".format(folder,\n today)\n LOGGER.info(\"Writing collateral valuation event data to {}\".format(collateral_file))\n write_csv_file(collateral_file, col_data, COL_HEADER, FOOTER)\n\n val_data = valuation_event(ael_dict['delegated_reporting_valuation'].Query().Select(),\n str(ael_dict['trade_prefix']), str(ael_dict['data_submitter_value']))\n valuation_file = \"{0}/Valuation_Reported_{1}.csv\".format(folder,\n today)\n LOGGER.info(\"Writing valuation event data to {}\".format(valuation_file))\n write_csv_file(valuation_file, val_data, VAL_HEADER, VAL_FOOTER)\n LOGGER.info(\"Processing Done\")\n","sub_path":"Python modules/Delegated_Reporting.py","file_name":"Delegated_Reporting.py","file_ext":"py","file_size_in_byte":13769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"76779185","text":"\n\nfrom xai.brain.wordbase.adverbs._fain import _FAIN\n\n#calss header\nclass _FAINER(_FAIN, ):\n\tdef __init__(self,): \n\t\t_FAIN.__init__(self)\n\t\tself.name = \"FAINER\"\n\t\tself.specie = 'adverbs'\n\t\tself.basic = \"fain\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/adverbs/_fainer.py","file_name":"_fainer.py","file_ext":"py","file_size_in_byte":230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"389877422","text":"# -*- coding: utf-8 -*-\nimport scrapy\n\nheaders = {'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6'}\n\nclass QqtvSpider(scrapy.Spider):\n name = 'qqtv'\n allowed_domains = ['v.qq.com']\n start_urls = ['http://v.qq.com/']\n\n def start_requests(self):\n raw_url = \"https://v.qq.com/x/bu/pagesheet/list?append=1&channel=movie&itype=100062&listpage=2&offset=%d&pagesize=%d\"\n\n pagesize = 30\n for offset in range(0, 500, pagesize):\n url = raw_url%(offset, pagesize)\n request = scrapy.Request(\n url=url,\n headers=headers,\n dont_filter=False,\n callback=self.parse_url,\n errback=self.parse_err\n )\n yield request\n\n def parse_url(self, response):\n urls = [x.extract() for x in response.xpath(\"//a/@href\")]\n for url in urls:\n request = scrapy.Request(\n url=url,\n headers=headers,\n dont_filter=False,\n callback=self.parse,\n errback=self.parse_err\n )\n yield request\n\n def parse_err(self, response):\n url = response.request.url\n item = {\n \"msg\": \"error\",\n \"info\": url\n }\n yield item\n\n def parse(self, response):\n title = response.xpath('//h1[@class=\"video_title _video_title\"]/text()')\n content = response.xpath('//p[@class=\"summary\"]/text()')\n\n title = list(filter(lambda x: x!=\"\", [x.extract().strip() for x in title]))\n content = list(filter(lambda x: x!=\"\",[x.extract().strip() for x in content]))\n\n content = \" \".join(content)\n\n result = {\n \"title\": \",\".join(title),\n \"content\": content\n }\n\n item = {\n \"msg\": \"success\",\n \"info\": result\n }\n yield item\n\n","sub_path":"example4/qqtvSpider/qqtvSpider/spiders/qqtv.py","file_name":"qqtv.py","file_ext":"py","file_size_in_byte":1938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"488650647","text":"import requests\nfrom lxml import etree\n\ntry:\n url = 'https://www.baidu.com/'\n headers = {'user-agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'}\n response = requests.get(url=url,headers=headers)\n response.encoding = response.apparent_encoding\n html = etree.HTML(response.content,etree.HTMLParser())\n # html = etree.tostring(html,encoding='utf-8').decode('utf-8')\n print(html.xpath('//a[last()]/text()'))\n print(html.xpath('//a[last()]'))\n print(html.xpath('//span[contains(@class,\"bg\")]/input')[-1].get('value'))\n print(html.xpath('//span[contains(@class,\"bg\")]/input/@value'))\n print(html.xpath('//a[starts-with(@name,\"tj_tr\")]/text()'))\nexcept:\n print(\"爬取失败\")\n","sub_path":"知识点/7-XPATH/Test3.py","file_name":"Test3.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"603030115","text":"# -*- coding: utf-8 -*-\n# /***************************************************************************/\n# * __________________________________\n# * METIS CYBERSPACE TECHNOLOGY S.A.\n# * www.metis.tech\n# * __________________________________\n# * [2019] All Rights Reserved.\n# *\n# * NOTICE: All information contained herein is, and remains\n# * the property of Metis CyberSpace Technology and its suppliers,\n# * if any. The intellectual and technical concepts contained\n# * herein are proprietary to METIS CYBERSPACE TECHNOLOGY\n# * and its suppliers and may be covered by European and Foreign Patents,\n# * patents in process, and are protected by trade secret or copyright law.\n# * Dissemination of this information or reproduction of this material\n# * is strictly forbidden unless prior written permission is obtained\n# * from Metis Cyberspace Technology.\n#\n# /***************************************************************************/\n#\n# Created Date: Thursday April 4th 2019\n# Author: Vassilis Lemonidis\n\"\"\"Module containing useful classes and utilities for connected components analysis\n\"\"\"\nimport cv2\nimport numpy as np\nfrom metis_pylib import LOGGER\nfrom Model.BasicObjects import GrayscaleGraphic\nfrom ImageProcessing.CCAnalysis import ConnectedComponent\n\n\ndef get_ccs(img):\n \"\"\"Retrieve the connected component from the input binary image img\n\n :param img: the input image\n :type img: np.ndarray\n :return: the list of connected components, sorted primarily\n by minimum y and secondarily by minimum x\n :rtype: list(ConnectedComponent)\n \"\"\"\n\n _, labels, stats, centroids = cv2.connectedComponentsWithStats(\n img.astype(np.uint8))\n ccs = [ConnectedComponent(i, GrayscaleGraphic(labels), stats)\n for i in range(1, len(centroids))]\n ccs = sorted(ccs, key=lambda x: (x.y_min, x.x_min))\n return ccs\n\n\ndef get_contained_ccs(ccs):\n \"\"\"Get for each connected component the number of the connected components that are contained in\n it\n\n :param ccs: the connected components objects\n :type ccs: list(ConnectedComponent)\n :return: the corresponding list of the numbers of the contained ccs per cc\n :rtype: list\n \"\"\"\n\n accessed_cnt = 0\n b_cnts = []\n for i, cc1 in enumerate(ccs):\n if cc1.area <= 2:\n b_cnts.append(0)\n continue\n b_cnt = 0\n for j, cc2 in enumerate(ccs[accessed_cnt + 1:]):\n if cc2.area <= 2:\n continue\n if i == j - accessed_cnt - 1:\n continue\n if cc2.y_min > cc1.y_max or cc2.x_min > cc1.x_max:\n if j == 0:\n accessed_cnt = i\n break\n if cc2.x_max < cc1.x_min or cc2.y_max < cc1.y_min:\n continue\n b_cnt += 1\n b_cnts.append(b_cnt)\n return b_cnts\n\n\ndef get_ccs_patch(img, ccs):\n \"\"\"Given an input image and a list of connected components, return a corresponding mask\n with ones the pixels of each connected component, useful for placing the cc_ patches inside\n the input image coordinates\n\n :param img: the input image\n :type img: np.ndarray\n :param ccs: the list of connected components\n :type ccs: list(ConnectedComponent)\n :return: the resulting ccs mask\n :rtype: np.ndarray\n \"\"\"\n\n if not ccs:\n return img\n patch = GrayscaleGraphic(np.zeros_like(img).astype(int))\n for cc_ in ccs:\n patch[cc_.indices] += cc_.patch.astype(int)\n return patch > 0\n\n\ndef get_shapes_surrounding_contours(\n img=None, contours=None, hierarchy=None):\n '''\n For each connected component of the given input, compute the external and internal contours\n of each shape.\n\n If given image, then use it to get the surrounding shapes.\n If given contours and hierarchy produced by cv2.findContours method with\n flag cv2.RETR_CCOMP, then use them instead.\n :param img: the binary image to be used. defaults to None\n :type img: None|np.ndarray\n :param contours: the list of the contours to be used, defaults to None\n :type contours: None|list\n :param hierarchy: the list of the found hierarchy, defualts to None\n :type hierarchy: None|list\n :return: a list of tuples (external_contour, [internal_contours])\n :rtype: list(tuple)\n '''\n assert img is not None or (\n contours is not None and hierarchy is not None), \"Incorrect call\"\n if img is None:\n if contours is None or hierarchy is None:\n raise BaseException(\n \"No image is given.\"\n \" The pair contours-hierarchy must be given\")\n else:\n _, contours, hierarchy = cv2.findContours(\n img.astype(np.uint8),\n cv2.RETR_CCOMP,\n cv2.CHAIN_APPROX_SIMPLE)\n\n def _get_contour_inner_point(contour):\n moments = cv2.moments(contour)\n if cv2.isContourConvex(contour):\n return [int(moments['m10'] / moments['m00']),\n int(moments['m01'] / moments['m00'])]\n else:\n possible_translations = np.array([[0, 1], [1, 0], [0, -1], [-1, 0],\n [1, 1], [-1, -1], [1, -1], [-1, 1]])\n for translation in possible_translations:\n if cv2.pointPolygonTest(\n contour,\n tuple(\n (contour[0][0] + translation).tolist()),\n False) == 1:\n return contour[0][0] + translation\n allowed_nodes = []\n assessed_node = [False] * len(contours)\n shapes_contours_inds = {}\n level = 0\n hierarchy = hierarchy[0]\n while not all(assessed_node):\n for ind, (contour, h_level) in enumerate(zip(contours, hierarchy)):\n if assessed_node[ind]:\n continue\n if not level:\n if h_level[2] == -1:\n assessed_node[ind] = True\n parent_ind = h_level[3]\n if parent_ind == -1:\n # is external filled shape\n if ind not in shapes_contours_inds:\n shapes_contours_inds[ind] = []\n continue\n\n grandpa_ind = hierarchy[parent_ind][3]\n inner_point = _get_contour_inner_point(contour)\n if not img[inner_point[1], inner_point[0]]:\n # is filled shape\n # allow grandpa contour to be added\n if (grandpa_ind != -1 and\n grandpa_ind not in allowed_nodes):\n allowed_nodes.append(grandpa_ind)\n if ind not in shapes_contours_inds:\n shapes_contours_inds[ind] = []\n try:\n shapes_contours_inds[grandpa_ind].append(\n parent_ind)\n except KeyError:\n shapes_contours_inds[grandpa_ind] = [\n parent_ind]\n assessed_node[parent_ind] = True\n\n else:\n # is not filled shape\n # allow parent contour to be added\n assessed_node[parent_ind] = True\n if shapes_contours_inds:\n shapes_contours_inds[-1].append(ind)\n try:\n shapes_contours_inds[parent_ind].append(ind)\n except KeyError:\n shapes_contours_inds[parent_ind] = [ind]\n if parent_ind not in allowed_nodes:\n allowed_nodes.append(parent_ind)\n\n else:\n if ind in allowed_nodes:\n assessed_node[ind] = True\n allowed_nodes.remove(ind)\n parent_ind = h_level[3]\n if ind not in shapes_contours_inds:\n shapes_contours_inds[ind] = {}\n if parent_ind != -1:\n grandpa_ind = hierarchy[parent_ind][3]\n if (grandpa_ind != -1 and\n grandpa_ind not in allowed_nodes):\n try:\n shapes_contours_inds[grandpa_ind].append(\n parent_ind)\n except KeyError:\n shapes_contours_inds[grandpa_ind] = [\n parent_ind]\n assessed_node[parent_ind] = True\n allowed_nodes.append(grandpa_ind)\n elif grandpa_ind == -1:\n raise BaseException(\"Incorrect Handling\"\n \" of image contours\")\n level += 1\n shapes = []\n for outer_ind in shapes_contours_inds:\n shapes.append((contours[outer_ind],\n [contours[ind] for ind in\n list(set(shapes_contours_inds[outer_ind]))]))\n return shapes\n\n\ndef get_labeled_ccs_patch(img, ccs):\n \"\"\"Given an input image and a list of connected components, return a corresponding labels image\n with the pixels of each cc_ having the value of the order of the cc_ in the given list + 1,\n useful for placing the cc_ patches labels inside the input image coordinates\n\n :param img: the input image\n :type img: np.ndarray\n :param ccs: the list of connected components\n :type ccs: list(ConnectedComponent)\n :return: the resulting ccs mask\n :rtype: np.ndarray\n \"\"\"\n patch = GrayscaleGraphic(np.zeros_like(img) - 1)\n if not ccs:\n return patch\n for cnt, cc_ in enumerate(ccs):\n partial = patch[cc_.indices]\n partial[cc_.patch > 0] = cnt\n return patch\n\n\ndef join_ccs(ccs):\n \"\"\"Join a list of ccs to a single \"connected component\", although this is not really a\n connected component, but only a ConnectedComponent object of their union\n\n :param ccs: the list of connected components\n :type ccs: list(ccs)\n :return: the joined \"connected component\", or None if empty list is given\n :rtype: ConnectedComponent|None\n \"\"\"\n\n if not ccs:\n return None\n from copy import deepcopy as copy\n out = copy(ccs[0])\n out._bbox = None\n for cc_ in ccs[1:]:\n out.area = cc_.area + out.area\n out.x_min = min(cc_.x_min, out.x_min)\n out.x_max = max(cc_.x_max, out.x_max)\n out.y_min = min(cc_.y_min, out.y_min)\n out.y_max = max(cc_.y_max, out.y_max)\n out.indices = (np.s_[out.y_min: out.y_max + 1],\n np.s_[out.x_min: out.x_max + 1])\n out.height = out.y_max - out.y_min + 1\n out.width = out.x_max - out.x_min + 1\n out.patch = GrayscaleGraphic(np.zeros((out.height, out.width), int))\n for cc_ in ccs:\n out.patch[cc_.bbox - out.bbox.top_left_point] += cc_.patch\n return out\n\n\ndef get_cc_bbox_patch(img, cc_):\n \"\"\"Given an image and a connect component, return the corresponding\n mask of its bounding box\n\n :param img: the image\n :type img: np.ndarray\n :param cc_: the connected component\n :type cc_: ConectedComponent\n :return: the mask of its bounding box\n :rtype: GrayscaleGraphic\n \"\"\"\n img = GrayscaleGraphic(np.zeros_like(img))\n rec = cc_.bbox\n img[rec] = 1\n return img\n\n\ndef get_superposed_patches(patches):\n \"\"\"Given a list of binary patches, create a new image that is the union of all of them\n\n :param patches: the list of patches\n :type patches: list(np.ndarray)\n :return: the resulting pattchezs union\n :rtype: np.ndarray\n \"\"\"\n\n img = patches[0].copy()\n for patch in patches[1:]:\n img += patch\n img = img > 0\n return img.astype(np.uint8)\n","sub_path":"Metis/file-parser/ImageProcessing/CCAnalysis/operations.py","file_name":"operations.py","file_ext":"py","file_size_in_byte":11919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"559866378","text":"from flask import Blueprint, request\n\nfrom orm import Pdb\nfrom .base import json_success, before_request, current_user\n\nbooks_bp = Blueprint('books', __name__)\nbefore_request = books_bp.before_request(before_request)\n\n\n@books_bp.route('/', methods=['GET', 'POST'])\ndef index():\n user = current_user\n print(user)\n books = Pdb.book.get_all_books()\n return json_success(books)\n\n\n@books_bp.route('/search', methods=['GET', 'POST'])\ndef search():\n type_id = request.args.get('type_id')\n keyword = request.args.get('keyword')\n\n books = Pdb.book.search_books(type_id, keyword)\n import pdb\n pdb.set_trace()\n return json_success(books)\n","sub_path":"controller/books.py","file_name":"books.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"589556072","text":"import os\nimport numpy as np\nimport math\nimport openravepy\nimport argparse\nimport sys\nimport json\nimport networkx as nx\n\nTHRESHOLD = 0.1\ndef calc_weight(config1, config2):\n return math.sqrt(float(np.sum((config2-config1)**2)))\n\ndef state_to_numpy(state):\n strlist = state.split()\n val_list = [float(s) for s in strlist]\n return np.array(val_list) \n\ndef save_modified_graph(G):\n file_addr = \"graphs/shallow_graph.graphml\"\n to_remove = []\n for i, edge in enumerate(G.edges()):\n u, v = edge\n G[u][v]['weight'] = calc_weight(state_to_numpy(G.node[u]['state']), state_to_numpy(G.node[v]['state']))\n if(G[u][v]['weight']>THRESHOLD):\n to_remove.append([u, v])\n for edge in to_remove:\n u, v = edge\n G.remove_edge(u, v) \n nx.write_graphml(G, file_addr) \n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Generate environments')\n parser.add_argument('--graphfile',type=str,required=True)\n args = parser.parse_args()\n \n G = nx.read_graphml(args.graphfile)\n\n save_modified_graph(G)\n","sub_path":"save_weighted_graph.py","file_name":"save_weighted_graph.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"30411480","text":"import json\nimport logging\n\nfrom django.conf import settings\nfrom django.template.defaultfilters import filesizeformat\nfrom django.utils.text import normalize_newlines\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views.decorators.debug import sensitive_variables\n\nfrom horizon import forms\nfrom horizon.utils import functions\nfrom horizon import workflows\nfrom horizon import exceptions\n\nfrom openstack_dashboard import api\n\nfrom openstack_dashboard.dashboards.project.images \\\n import utils as image_utils\nfrom openstack_dashboard.dashboards.project.instances \\\n import utils as instance_utils\n\nLOG = logging.getLogger(__name__)\n\nclass SetInstanceDetailsAction(workflows.Action):\n #availability_zone = forms.ChoiceField(label=_(\"Availability Zone\"),\n # required=False)\n\n name = forms.CharField(label=_(\"Instance Name\"),\n max_length=255)\n\n #flavor = forms.ChoiceField(label=_(\"Flavor\"),\n # help_text=_(\"Size of image to launch.\"))\n\n count = forms.IntegerField(label=_(\"Instance Count\"),\n min_value=1,\n initial=1,\n #widget=forms.TextInput(attrs={'readonly': 'readonly'}),\n help_text=_(\"Number of instances to launch.\"))\n\n #address = forms.IPField(label=_(\"Network Address\"),\n # required=False,\n # help_text=_(\"Specify the IP Address of instance. \"\n # \"If you use the default, leave blank.\"),\n # version=forms.IPv4,\n # mask=False)\n cidr = forms.ChoiceField(label=_(\"Select subnet\"))\n\n #gateway = forms.ChoiceField(label=(\"Select Default Access Point for SSH\"))\n\n image_id = forms.ChoiceField(\n label=_(\"Image Name\"),\n #required=False,\n widget=forms.SelectWidget(\n data_attrs=('volume_size',),\n transform=lambda x: (\"%s (%s)\" % (x.name,\n filesizeformat(x.bytes)))))\n class Meta:\n name = _(\"Details\")\n #help_text_template = (\"project/instances/\"\n # \"_launch_details_help.html\")\n\n def __init__(self, request, context, *args, **kwargs):\n self._init_images_cache()\n super(SetInstanceDetailsAction, self).__init__(\n request, context, *args, **kwargs)\n\n try:\n netypes = api.proxy.network_type_list()\n except:\n netypes = []\n exceptions.handle(request, _('Unable to retrieve network type.'),\n ignore=True)\n\n self.fields['cidr'].choices = ((nt.id, nt.cidr) for nt in netypes)\n\n def clean(self):\n cleaned_data = super(SetInstanceDetailsAction, self).clean()\n if not cleaned_data.get('image_id'):\n msg = _(\"You must select a image.\")\n raise forms.ValidationError(msg)\n\n count = cleaned_data.get('count', 1)\n #limit = api.proxy.project_absolute_limits(\n # self.request, self.initial['zone_id'])\n #if limit.maxTotalInstances - limit.totalInstancesUsed - count < 0:\n # msg = _(\"Quota exceeded.\")\n # raise forms.ValidationError(msg)\n\n limits = api.proxy.user_absolute_limits(self.request)\n\n if limits.maxTotalInstances - limits.totalInstancesUsed - count < 0:\n msg = _(\"Quota exceeded.\")\n raise forms.ValidationError(msg)\n\n address = cleaned_data.get('address', None)\n if address is not None:\n try:\n api.proxy.server_network(self.request, address)\n except:\n msg = _(\"This address was in used.\")\n raise forms.ValidationError(msg)\n \n return cleaned_data\n\n #def populate_flavor_choices(self, request, context):\n # flavors = instance_utils.flavor_list(request, context['zone_id'])\n # if flavors:\n # return instance_utils.sort_flavor_list(request, flavors)\n # return []\n\n #def populate_availability_zone_choices(self, request, context):\n # try:\n # zones = api.nova.availability_zone_list(self._request)\n # except Exception:\n # zones = []\n # exceptions.handle(request,\n # _('Unable to retrieve availability zones.'))\n\n # zone_list = [(zone.zoneName, zone.zoneName)\n # for zone in zones if zone.zoneState['available']]\n # zone_list.sort()\n # if not zone_list:\n # zone_list.insert(0, (\"\", _(\"No availability zones found\")))\n # elif len(zone_list) > 1:\n # zone_list.insert(0, (\"\", _(\"Any Availability Zone\")))\n # return zone_list\n\n #def get_help_text(self):\n # extra = {}\n # try:\n # flavors = json.dumps([f._info for f in\n # instance_utils.flavor_list(self.request,\n # self.initial['zone_id'])])\n # extra['flavors'] = flavors\n\n # except Exception:\n # exceptions.handle(self.request,\n # _(\"Unable to retrieve quota information.\"))\n # return super(SetInstanceDetailsAction, self).get_help_text(extra)\n\n def _init_images_cache(self):\n if not hasattr(self, '_images_cache'):\n self._images_cache = {}\n\n def populate_image_id_choices(self, request, context):\n choices = []\n image_dict = {}\n #images = image_utils.get_available_images(request,\n # context.get('zone_id'),\n # images_cache=self._images_cache)\n try:\n zones = api.proxy.availability_zone_list(request)\n except:\n zones = []\n exceptions.handle(request, _('Unable to retrieve zones.'), ignore=True)\n\n for zone in zones:\n images = image_utils.get_available_images(\n request, zone.id, images_cache=self._images_cache)\n for image in images:\n image_dict[image.name] = image\n\n for image in image_dict.values():\n image.bytes = image.size\n image.volume_size = max(\n image.min_disk, functions.bytes_to_gigabytes(image.bytes))\n choices.append((image.id, image))\n if choices:\n choices.sort(key=lambda c: c[1].name)\n choices.insert(0, (\"\", _(\"Select Image\")))\n else:\n choices.insert(0, (\"\", _(\"No images available\")))\n return choices\n\n\nclass SetInstanceDetails(workflows.Step):\n action_class = SetInstanceDetailsAction\n #depends_on = (\"zone_id\",)\n #contributes = (\"name\", \"count\", \"address\", \"flavor\", \"image_id\")\n contributes = (\"name\", \"count\", \"cidr\", \"flavor\", \"image_id\")\n\nclass LaunchInstance(workflows.Workflow):\n slug = \"launch_instance\"\n name = _(\"Launch Instance\")\n finalize_button_name = _(\"Launch\")\n success_message = _('Launched %(count)s named \"%(name)s\".')\n failure_message = _('Unable to launch %(count)s named \"%(name)s\".')\n success_url = \"horizon:project:instances:index\"\n default_steps = (SetInstanceDetails,)\n\n def format_status_message(self, message):\n name = self.context.get('name', 'unknown instance')\n count = self.context.get('count', 1)\n if int(count) > 1:\n return message % {\"count\": _(\"%s instances\") % count,\n \"name\": name}\n else:\n return message % {\"count\": _(\"instance\"), \"name\": name}\n\n @sensitive_variables('context')\n def handle(self, request, context):\n custom_script = context.get('customization_script', '')\n\n image_id = context['image_id']\n\n if not context.get('zone_id', None):\n context['zone_id'] = None\n\n if not context.get('count', None):\n context['count'] = 1\n\n if not context.get('address', None):\n context['address'] = None\n\n if not context.get('flavor', None):\n context['flavor'] = 1\n\n if not context.get('cidr', None):\n context['cidr'] = 1\n\n request.session['netype'] = int(context['cidr'])\n\n netids = context.get('network_id', None)\n if netids:\n nics = [{\"net-id\": netid, \"v4-fixed-ip\": \"\"}\n for netid in netids]\n else:\n nics = None\n\n avail_zone = context.get('availability_zone', None)\n\n if not context.get('keypair_id', None):\n context['keypair_id'] = None\n\n if not context.get('security_group_ids', None):\n context['security_group_ids'] = None\n\n if not context.get('admin_pass', None):\n context['admin_pass'] = None\n\n if not context.get('disk_config', None):\n context['disk_config'] = None\n\n api.proxy.server_create(request,\n context['name'],\n image_id,\n context['flavor'],\n zone_id=context['zone_id'],\n key_name=context['keypair_id'],\n user_data=normalize_newlines(custom_script),\n security_groups=None,\n block_device_mapping=None,\n block_device_mapping_v2=None,\n nics=nics,\n availability_zone=avail_zone,\n instance_count=int(context['count']),\n admin_pass=context['admin_pass'],\n disk_config=context['disk_config'],\n accessIPv4=context['address'],\n net_type=int(context['cidr']))\n return True\n","sub_path":"tools/dockerize/webportal/usr/share/openstack-dashboard/openstack_dashboard/dashboards/project/instances/workflows/create_instance.py","file_name":"create_instance.py","file_ext":"py","file_size_in_byte":10020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"463506234","text":"# Find the largest palindrome made from the product of two 3-digit numbers\n\n\ndef isPalindrome(str):\n if len(str) == 1 or len(str) == 0:\n return True\n if str[0] == str[-1]:\n return isPalindrome(str[1:-1])\n return False\n\n\ndef largestPalindrome():\n largest = -1\n \n for x in range(999, 0, -1):\n for y in range(999, 0, -1):\n product = x * y\n if isPalindrome(str(product)) and product > largest:\n largest = product\n\n return largest\n\n\nprint(largestPalindrome()) # 906609\n","sub_path":"python/problem4.py","file_name":"problem4.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"597064790","text":"import os\nimport json\nfrom datetime import datetime, timedelta\n\nGOOGLE_APIS = (\"drive\", \"admin\", \"groupssettings\")\nFILTERS = {\n # content/get, content/list, ...\n \"content/*\": [\n \"lastRevision\",\n \"authorDetails\",\n \"updatedByDetails\",\n \"writerDetails\",\n \"headerDetails\",\n \"customContentTypeDetails\",\n ],\n # community/get, community/list, ...\n \"community/*\": [\n \"lastRevision\",\n \"authorDetails\",\n \"updatedByDetails\",\n \"writerDetails\",\n \"headerDetails\",\n \"customContentTypeDetails\",\n \"adminsDetails\",\n \"usersDetails\",\n ],\n \"communitytemplate/*\": [\n \"lastRevision\",\n \"authorDetails\",\n \"updatedByDetails\",\n \"writerDetails\",\n \"headerDetails\",\n \"customContentTypeDetails\",\n \"adminsDetails\",\n \"usersDetails\",\n ],\n # template/get, template/list, ...\n \"template/*\": [\"properties/duplicateContent\"],\n \"community/post/*\": [\n \"authorDetails\",\n \"updatedByDetails\",\n \"mentionsDetails\",\n \"parentContentDetails\",\n ],\n \"comment/get\": [\"authorProperties\", \"mentionsDetails\"],\n \"comment/list\": [\"authorProperties\", \"mentionsDetails\"],\n}\n\n\ndef pop_matches(dpath, d):\n if not dpath:\n return\n for pth_part in dpath.split(\"/\")[:-1]:\n if not isinstance(d, dict):\n return\n d = d.get(pth_part)\n if not isinstance(d, dict):\n return\n d.pop(dpath.rpartition(\"/\")[2], None)\n\n\ndef get_conf_file():\n if \"APPDATA\" in os.environ:\n d = os.environ[\"APPDATA\"]\n elif \"XDG_CONFIG_HOME\" in os.environ:\n d = os.environ[\"XDG_CONFIG_HOME\"]\n else:\n d = os.path.join(os.path.expanduser(\"~\"), \".config\")\n if __package__:\n return os.path.join(d, __package__ + \".conf\")\n else:\n return os.path.join(d, \"lumapps_api_client.conf\")\n\n\ndef get_conf():\n try:\n with open(get_conf_file()) as fh:\n conf = json.load(fh)\n except IOError:\n return {\"configs\": {}, \"cache\": {}}\n if not conf:\n conf = {\"configs\": {}, \"cache\": {}}\n return conf\n\n\ndef set_conf(conf):\n try:\n with open(get_conf_file(), \"wt\") as fh:\n return json.dump(conf, fh, indent=4)\n except IOError:\n pass\n\n\nclass ApiCallError(Exception):\n pass\n\n\nclass DiscoveryCache(object):\n _max_age = 60 * 60 * 24 # 1 day\n\n @staticmethod\n def get(url):\n cached = get_conf()[\"cache\"].get(url)\n if not cached:\n return None\n expiry_dt = datetime.strptime(cached[\"expiry\"][:19], \"%Y-%m-%dT%H:%M:%S\")\n if expiry_dt < datetime.now():\n return None\n return cached[\"content\"]\n\n @staticmethod\n def set(url, content):\n conf = get_conf()\n conf[\"cache\"][url] = {\n \"expiry\": (\n datetime.now() + timedelta(seconds=DiscoveryCache._max_age)\n ).isoformat()[:19],\n \"content\": content,\n }\n set_conf(conf)\n","sub_path":"lumapps/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"145032140","text":"import wizard\nimport pooler\nfrom tools.misc import UpdateableStr,UpdateableDict\nimport time\nimport operator\nfrom osv import osv, fields\nfrom osv.orm import intersect\nimport tools.sql\nfrom tools import config\nfrom tools.translate import _\nfrom mx import DateTime\n\nform_error = '''\n'''\n\n\ndef _check(self, cr, uid, data, context):\n try:\n prj_id = data['ids']\n except:\n prj_id = data['id']\n if len(prj_id)>1:\n return 'print_error'\n \n pool = pooler.get_pool(cr.dbname)\n prj_obj = pool.get('kdvn.project')\n prj = prj_obj.browse(cr,uid,prj_id,context=context)\n \n if prj[0].project_type=='E':\n return 'print_job_e'\n else:\n return 'print_job_m'\n\nclass wizard_job_and_quotation_list(wizard.interface):\n \"\"\"\n Job and Quotation List Report\n \"\"\"\n states = {\n \n 'init': {\n 'actions': [],\n 'result': {'type': 'choice','next_state':_check\n } \n },\n 'print_error': {\n 'actions': [],\n 'result': {\n 'type': 'form',\n 'arch':form_error,\n 'fields':{},\n 'state':[('end','Ok','gtk-ok')]\n }},\n 'print_job_e': {\n 'actions': [],\n 'result': {'type':'print', 'report':'kdvn.report.job.quotation.list.electrical.hidden','state':'end'\n }\n },\n 'print_job_m': {\n 'actions': [],\n 'result': {'type':'print', 'report':'kdvn.report.job.quotation.list.mechanical.hidden','state':'end'\n }\n },\n }\nwizard_job_and_quotation_list('job_and_quotation_list')\n\nclass wizard_job_and_quotation_list_excel(wizard.interface):\n \"\"\"\n Job and Quotation List Excel Report\n \"\"\"\n states = {\n \n 'init': {\n 'actions': [],\n 'result': {'type': 'choice','next_state':_check\n } \n },\n 'print_error': {\n 'actions': [],\n 'result': {\n 'type': 'form',\n 'arch':form_error,\n 'fields':{},\n 'state':[('end','Ok','gtk-ok')]\n }},\n 'print_job_e': {\n 'actions': [],\n 'result': {'type':'print', 'report':'kdvn.report.job.quotation.list.electrical.excel.hidden','state':'end'\n }\n },\n 'print_job_m': {\n 'actions': [],\n 'result': {'type':'print', 'report':'kdvn.report.job.quotation.list.mechanical.excel.hidden','state':'end'\n }\n },\n }\nwizard_job_and_quotation_list_excel('job_and_quotation_list_excel')\n\n\ninputform=''''''\n\n#Fields=UpdateableDict()\n\nFields={'date':{'string': 'Date', 'type': 'date','default': lambda *a:time.strftime('%Y-%m-%d')}}\n\n\nclass wizard_job_and_quotation_date(wizard.interface):\n \"\"\"\n Job and Quotation Date\n \"\"\"\n states = {\n 'init': {\n 'actions': [],\n 'result': {'type': 'form','arch':inputform,'fields':Fields,'state': [\n ('update_report', '_Print', 'gtk-ok', True),\n ('end', '_Cancel','gtk-cancel'),\n ]\n }}, \n 'update_report': {\n 'actions': [],\n 'result': {'type':'print', 'report':'job.and.quotation.list.date.hidden','state':'end'}}\n }\nwizard_job_and_quotation_date('job_and_quotation_date')\n","sub_path":"src/module_reports/wizard/wizard_job_and_quotation_list.py","file_name":"wizard_job_and_quotation_list.py","file_ext":"py","file_size_in_byte":3898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"265092019","text":"import os\nimport sys\nimport numpy as np\nsys.path.append(os.path.dirname(os.path.abspath(__file__)))\nimport wanderer\n\n\nclass Scenario(wanderer.Scenario):\n def set_params(self):\n self.n_agents = 1\n self.PENALTY_WEIGHT = 0.0 # energy\n self.PENALTY_WEIGHT2 = 0.0 # attention\n self.VISIBLE_WEIGHT = 0.1\n self.L_PROB = 1.0\n\n def get_world(self):\n world = wanderer.World3()\n world.dim_c = 3\n return world\n\n def reward(self, agent, world):\n dist2 = self.dist2(agent.state.p_pos, world.landmarks[0].state.p_pos)\n rew = -dist2\n rew += self.PENALTY_WEIGHT * agent.state.energy\n rew += self.PENALTY_WEIGHT2 * agent.state.attention\n return rew\n\n def observation(self, agent, world):\n agent.state.visible_radius = self.calc_visible_radius(agent)\n\n entity_pos = []\n for entity in world.landmarks:\n dv = agent.state.p_pos - entity.state.p_pos\n proc_dv = self.mask_vector(dv, agent.state.visible_radius, self.L_PROB)\n entity_pos.append(proc_dv)\n\n # multi-agent case\n other_pos = []\n for other in world.agents:\n if other is agent:\n continue\n # only in the B case, agnet can get others' position\n if np.argmax(agent.state.c) == 1:\n vec_to_other = other.state.p_pos - agent.state.p_pos\n else:\n vec_to_other = np.zeros_like(other.state.p_pos)\n other_pos.append(vec_to_other)\n return np.concatenate([agent.state.p_vel] + entity_pos + other_pos)\n","sub_path":"multiagent-particle-envs/multiagent/scenarios/wanderer2.py","file_name":"wanderer2.py","file_ext":"py","file_size_in_byte":1610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"200472515","text":"# 创建字典\n#dictA={\"键\":'值','key':'value'}\ndictA={\"pro\":'艺术','shcool':'北京电影学院'}\n\n# 添加字典数据\n#dictA['键']=值\ndictA['name']='李易峰' #key:value\ndictA['age']='30'\ndictA['pos']='歌手'\n\n# print(dictA) #输出完整的字典\n#-------------键值对个数\n# print(len(dictA))\n# print(type(dictA))\n\n# print(dictA['name']) #通过键获取对应的值\n# dictA['name']='谢霆锋' #修改键对应的值\ndictA['shcool']='香港大学'\n\n# ------------可以添加或者更新\n# dictA.update({'height':1.80})\n# print(dictA)\n\n# ------------获取所有的键\n# print(dictA.keys())\n# ------------获取所有的值\n# print(dictA.values())\n# ------------获取所有的键和值\nprint(dictA.items())\nfor key,value in dictA.items():\n print('%s==%s'%(key,value))\n\n# --------------------删除操作\n# del dictA['name'] #--------通过指定键进行删除\n# dictA.pop('age') #--------通过指定键进行删除\n#print(dictA)\n\n# --------------按照key排序\n# print(sorted(dictA.items(),key=lambda d:d[0]))\n# 按照value排序\n# print(sorted(dictA.items(),key=lambda d:d[1]))\n\n# ---------------字典拷贝 字典之间不能直接赋值 dicB = dicA\n# import copy\n# # dictB=copy.copy(dictA) #浅拷贝\n# dictC=copy.deepcopy(dictA) #深拷贝\n# print(id(dictC))\n# print(id(dictA))\n# # dictB['name']='peter'\n# dictC['name']='刘德华'\n# print(dictC)\n# print(dictA)\n\n# 拷贝有三种方式:\n# 直接赋值:其实就是对象的引用(别名)。\n# 浅拷贝(copy):拷贝父对象,不会拷贝对象的内部的子对象。在源对象上操作新对象可能会受影响。\n# 深拷贝(deepcopy): copy 模块的 deepcopy 方法,完全拷贝了父对象及其子对象。在源对象上操作新对象不受影响\n\n","sub_path":"day-python/day程序/day3/字典.py","file_name":"字典.py","file_ext":"py","file_size_in_byte":1760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"636071037","text":"from utils import PubSub, topics\nimport asyncio\n\n\nclass Base:\n def __init__(self, hub):\n self.hub = hub\n self._ivar_map = {}\n self._authoritative = set()\n\n def __getattr__(self, item):\n pass\n\n def define_ivar(self, key, topic, default=None, authoritative=False):\n setattr(self, key, default)\n self._ivar_map[key] = topic\n if authoritative:\n self._authoritative.add(key)\n\n async def update_ivar(self, topic, key):\n with PubSub.Subscription(self.hub, topic) as queue:\n while True:\n setattr(self, key, await queue.get())\n\n async def update_ivars(self):\n updaters = []\n\n for key in self._ivar_map:\n updaters.append(self.update_ivar(self._ivar_map[key], key))\n\n await asyncio.gather(*updaters)\n\n def publish_authoritative(self):\n for key in self._authoritative:\n self.hub.publish(self._ivar_map[key], getattr(self, key))\n\n def pre_futures(self):\n return []\n\n def futures(self, loop):\n return [self.update_ivars()]","sub_path":"coroutines/Base.py","file_name":"Base.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"143260527","text":"import scipy.optimize as scipy\r\nimport scipy as sc\r\nimport numpy as np\r\n\r\nA = np.array([[2,1,1,0,0,0,0],[1,2,1,3,2,1,0],[0,0,2,1,2,4,6]])\r\nb = np.array([12000, 24000, 27000])\r\nc = np.array([0,3,1,1,4,2,0])\r\n\r\nA_dual = np.transpose(A)\r\nb_dual = c*(-1)\r\nc_dual = b\r\n\r\n\r\nres = scipy.linprog(c_dual, A_dual*(-1), b_dual*(-1)).x\r\nprint(\"rozwiązanie dualne:\")\r\nprint(res)\r\n\r\nnierownosc_ostra = []\r\nfor i in range(len(A_dual)):\r\n\tif round(res[0]*A_dual[i][0]+res[1]*A_dual[i][1],2) == b_dual[i]:\r\n\t\tnierownosc_ostra.append(False)\r\n\telse:\r\n\t\tnierownosc_ostra.append(True)\r\n\r\n\r\nA2 = [[2,1,1,0,0,0,0],[1,2,1,3,2,1,0],[0,0,2,1,2,4,6]]\r\nA3 = []\r\nfor i in range(len(nierownosc_ostra)):\r\n\tif not nierownosc_ostra[i]:\r\n\t\tA3.append(A_dual[i])\r\n\r\nA4 = np.array(A3)\r\nA4 = np.transpose(A4)\r\n\r\nQ, R = np.linalg.qr(A4)\r\nx = sc.linalg.solve_triangular(R, Q.T.dot(b), lower=False)\r\n\r\nj=0\r\nfor i in range(len(A[0])):\r\n\tprint (\"x\"+str(i+1)+\" = \", end=\"\")\r\n\tif (nierownosc_ostra[i]):\r\n\t\tprint(\"0\")\r\n\telse:\r\n\t\tprint(x[j])\r\n\t\tj+=1","sub_path":"lab08_minimax-master/lab08_minimax-master/lab8_4.py","file_name":"lab8_4.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"422046348","text":"#!/usr/bin/env python\nimport RPi.GPIO as GPIO\nimport os\nimport pygame\nimport pygame.freetype\n\nimport sys\nimport subprocess\nimport time\nimport _thread\n\nsys.path.append(\"home/pi/go/bin/\")\nphoto_folder = \"/home/pi/fotobox_bilder\"\nqr_path = \"/home/pi/fotobox/qr_small.png\"\nqr_image = None\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(22, GPIO.IN, pull_up_down=GPIO.PUD_UP) # input\nGPIO.setup(18, GPIO.OUT) # LED\nGPIO.setup(17, GPIO.OUT) # green led\n\nWIDTH = 1680\nHEIGHT = 1050\n\n\ndef io_remote():\n # switch on ready\n GPIO.output(17, 1)\n # wait for button press\n GPIO.wait_for_edge(22, GPIO.RISING, bouncetime=100)\n # switch of green light\n GPIO.output(17, 0)\n\n\ndef get_file_name():\n # name mit Timestamp versehen\n ts = time.gmtime()\n readable_ts = time.strftime(\"%H_%M_%S\", ts)\n photo_name = \"/bhwp_2020_\" + readable_ts + \".jpg\"\n return photo_folder + photo_name\n\n\ndef load_image(img_path):\n return pygame.image.load(img_path).convert()\n\n\ndef signal_hook(running=True):\n while running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n runnig = False\n pygame.quit()\n elif event.type == pygame.KEYDOWN and event.key == pygame.K_f:\n pygame.display.toggle_fullscreen()\n\n\ndef display_image(screen, img):\n try:\n img = pygame.transform.scale(img, (WIDTH, HEIGHT))\n screen.blit(img, (0, 0))\n except:\n pass\n pygame.display.flip()\n pygame.display.update()\n\n\ndef display_qr(screen, qr):\n screen.blit(qr, (1330, 700))\n pygame.display.flip()\n pygame.display.update()\n\n\ndef display_text(screen, text, color=(20, 240, 100), size=70):\n font = pygame.freetype.Font(\n \"/usr/share/fonts/truetype/liberation2/LiberationSans-Bold.ttf\", size\n )\n text_renderd, text_rect = font.render(text, color)\n screen_rect = screen.get_rect()\n text_rect.centerx = screen_rect.centerx\n screen.blit(text_renderd, (50, 50))\n pygame.display.update()\n\n\ndef sync_photo(photo_path):\n # hochladen des bildes\n print(\"Uploading file..\")\n print(os.path.isfile(photo_path))\n subprocess.run(\n [\"rclone\", \"copy\", \"{}\".format(photo_path), \"fotobox_remote:fotobox\"]\n )\n print(\"photo uploaded!\")\n\n\npygame.init()\nscreen = pygame.display.set_mode((WIDTH, HEIGHT), 0, 0)\n_thread.start_new_thread(signal_hook, ())\nif not qr_image:\n qr_img = load_image(qr_path)\n\nwhile 1:\n print(\"Starting...\")\n GPIO.output(18, 0)\n while 1:\n display_qr(screen, qr_img)\n io_remote()\n try:\n display_image(screen, img)\n except NameError:\n pass\n display_text(screen, \"Geschafft! Erstmal chillen...\", color=(10, 50, 255))\n # shoot picture\n photo_path = get_file_name()\n print(photo_path)\n # Capture photo\n try:\n subprocess.run(\n [\n \"gphoto2\",\n \"--capture-image-and-download\",\n \"--camera='Canon EOS 350D (normal mode)'\",\n \"--filename={}\".format(photo_path),\n \"--force-overwrite\",\n ]\n )\n except:\n continue\n try:\n img = load_image(photo_path)\n display_image(screen, img)\n display_text(\n screen, \"Zuerst das Vergnügen, dann der Upload in die cloud...\", size=50\n )\n except:\n pass\n\n try:\n if os.path.isfile(photo_path):\n sync_photo(photo_path)\n except:\n print(\"not uplloaded\")\n\n display_image(screen, img)\n display_text(screen, \"Kamera is breit!\", color=(20, 230, 20))\n print(\"Return to main process\")\n\n# gdrive about --service-account fotobox-265418-e52db5a765c2.json\n# gdrive --service-account fotobox-265418-e52db5a765c2.json share ~/fotobox_bilder\n\n# gdrive --service-account fotobox-265418-e52db5a765c2.json sync list\n","sub_path":"fotobox.py","file_name":"fotobox.py","file_ext":"py","file_size_in_byte":3981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"372750806","text":"import datetime, hashlib, time, json\nfrom dateutil.parser import parse\nimport re\n\ndef stringToDate(datestring):\n arr = datestring.split(\"/\")\n finaldate = \"\"\n i = 0\n for i in range(0, len(arr)):\n if i < len(arr) - 1:\n arr[i] = \"0\" + str(arr[i])\n arr[i] = arr[i][-2:]\n\n finaldate += arr[i] + \"/\"\n return finaldate[:-1]\n\n\n\ndef computeMD5hash(string):\n m = hashlib.md5()\n m.update(string.encode('utf-8'))\n return m.hexdigest()\n\ndef yearFromStringDate(datestring):\n #datestring = stringToDate(datestring)\n #dt = time.strptime(datestring.strip(), '%m/%d/%Y')\n arr = datestring.split(\"/\")\n return arr[2]\n\n\ndef getJsonMapping(year, mappingName):\n jsonFileName = 'mappings/' + str(year) + '/' + mappingName + '.json'\n json_data=open(jsonFileName)\n data = json.load(json_data)\n json_data.close()\n\n return data\n\ndef convertDateToMySQL(strDate):\n print(strDate)\n try:\n return str(parse(strDate).strftime(\"%Y-%m-%d\"))\n except ValueError:\n return \"\"\n\n\ndef convertDateTimeToMySQL(strDateTime):\n try:\n return str(parse(strDateTime).strftime(\"%Y-%m-%d %H:%M:%S\"))\n except ValueError:\n return \"\"\n\n\ndef stripNonNumeric(strNum):\n return re.sub(\"[^0-9]\", \"\", strNum)\n\n\ndef prefixZeros(input, maxLen):\n prefix = \"\"\n input = str(input).strip();\n for i in range(0, maxLen - len(input)):\n prefix = prefix + \"0\"\n\n return prefix + input","sub_path":"oralpathology/oralpathimport/utilities/stringutils.py","file_name":"stringutils.py","file_ext":"py","file_size_in_byte":1461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"38939633","text":"text = \"paraparaparadise\"\ntext2 = \"paragraph\"\nn = 2\n\nX = []\nY = []\n\nfor i in range(0, len(text), n-1):\n result1 = \"\"\n result1 += text[i:i+n]\n X.append(result1)\n\nfor i in range(0, len(text2), n-1):\n result2 = \"\"\n result2 += text2[i:i+n]\n Y.append(result2)\n\nif 'se' in X or 'se' in Y:\n print(\"ある\")\n\n# print(X)\n# print(Y)\n\nfor i in X:\n for j in Y:\n ii = set(i)\n jj = set(j)\n print(\"和集合\")\n print(ii | jj)\n print(\"積集合\")\n print(ii & jj)\n print(\"差集合\")\n print(ii - jj)","sub_path":"chapter1/q06.py","file_name":"q06.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"601725526","text":"#/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# Socket,建立TCP连接实现Web功能,获取服务器响应,\n# 若响应正确(存在文件)则返回成html格式,响应错误(不存在文件)则返回404.html\nfrom socket import *\n\n#判断是否为html文件的函数,输入文件名,是.html文件返回true,不是则返回false\ndef isHtml(file_name):\n arr = file_name.split('.')\n postfix = arr[len(arr)-1]\n if postfix == \"html\":\n return True\n else:\n return False\n\n#读取文件,并反馈IOExpection,根据后缀名(是否为html),对读取文件进行不同程度修饰\ndef file_read(path, encode=\"utf-8\"):\n if path[:1] == '/':\n path = path[1:]\n\n result = \"\"\n with open(path, mode='r', encoding=encode, errors='ignore') as f:\n lines = f.readlines()\n for line in lines:\n result += line;\n\n if isHtml(path):\n result = \"HTTP/1.1 200 OK\\r\\n\\r\\n\" + result\n else:\n result = \"HTTP/1.1 200 OK\\r\\n\\r\\n\" \\\n + \"\" \\\n + \"\" + path + \"(格式化为html)\" + \"\" \\\n + \"\" + path + \"
\" \\\n + result + \"
\"\n return result\n\n#建立serverSocket并绑定到本机(127.0.0.1)的80端口,开启一个监听\n#(在主机已占用80端口的情况下,请换端口开启)\n#开启后由于http响应默认为80端口,则用户无需再输入端口\nserverPort = 80\nserverSocket = socket(AF_INET,SOCK_STREAM)\nserverSocket.bind(('127.0.0.1',serverPort))\nserverSocket.listen(1)\n\n#主循环监听,并提供服务\nwhile True:\n #测试启动正常,开始服务\n print('Ready to serve...')\n\n #由响应建立TCP连接\n client_connection, client_address = serverSocket.accept()\n\n #接受响应报文,解码成str\n request = client_connection.recv(1024).decode()\n\n #处理响应报文,拿到请求文件路径(含文件名)\n path = request.split('\\r')[0].split(' ')[1]\n\n #尝试在服务器上寻找,路径表示的文件,有则读取并返回该文件,无则返回404页面\n try:\n response = file_read(path)\n print(\"\\tfrom\",client_address,\"\\tGET:\",path,\", request succeed!\")\n except IOError:\n response = file_read(\"404.html\")\n print(\"\\tfrom\",client_address,\"\\tGET:\",path,\", request fail!\")\n\n #根据读取结果发送响应报文\n client_connection.sendto(response.encode(\"utf-8\"),client_address)\n\n #关闭TCP连接\n client_connection.close()\n\n","sub_path":"basic_learning/Test/test1/webServer.py","file_name":"webServer.py","file_ext":"py","file_size_in_byte":2573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"80435871","text":"#!/usr/bin/env python\n\nfrom sys import argv\nfrom matplotlib import pyplot\nfrom matplotlib import lines as mlines\n\n\ndef make_figure(x_vals, y_precision, y_recall, y_f1, x_label, y_label=\"Key Figures\", save_as=False):\n \"\"\"Draw a graph based on the values specified\"\"\"\n p_line = mlines.Line2D([], [], label=\"Precision\", linestyle=\"--\", color=\"red\")\n r_line = mlines.Line2D([], [], label=\"Recall\", linestyle=\"-.\", color=\"green\")\n f_line = mlines.Line2D([], [], label=\"F1\", linestyle=None, color=\"blue\")\n\n pyplot.plot(x_vals, y_precision, \"r--\")\n pyplot.plot(x_vals, y_recall, \"g-.\")\n pyplot.plot(x_vals, y_f1, \"b\")\n pyplot.legend(handles=[p_line, r_line, f_line])\n pyplot.ylabel(y_label)\n pyplot.xlabel(x_label)\n\n if save and save_as is not None:\n pyplot.savefig(save_as)\n else:\n pyplot.show()\n\n pyplot.close()\n\n\nsave = False\nknn = False\nrf = False\nmlp = False\nread = False\nfile_name = \"figure.png\"\n\nif len(argv) > 1:\n for arg in argv:\n arg = arg.lower()\n if arg in \"save\" and arg[0] == \"s\":\n save = True\n if arg in \"read\" and arg[0] == \"r\":\n read = True\n if arg in \"knn\" and arg[0] == \"k\":\n knn = True\n rf = False\n mlp = False\n file_name = \"c4-knn.png\"\n if arg in \"rf\" and arg[0] == \"r\":\n knn = False\n rf = True\n mlp = False\n file_name = \"c4-forests.png\"\n if arg in \"mlp\" and arg[0] == \"m\":\n knn = False\n rf = False\n mlp = True\n file_name = \"c4-mlp.png\"\n\nif not knn and not rf and not mlp and not read:\n print(\"You have to specify either 'read', 'knn', 'rf' or 'mlp'!\")\n exit(1)\n\n\nif read:\n lines = []\n with open(\"c4-results.txt\") as result_file:\n lines = result_file.readlines()\n\n results = {\"knn\": [], \"forest\": [], \"neural\": [], \"bayes\": []}\n section = None\n\n for line in [l.strip() for l in lines if len(l.strip()) > 0]:\n if section is None:\n section = line\n elif line.startswith(\"end \"):\n section = None\n else:\n result = line.split()\n results[section].append(result)\n\n results.pop(\"bayes\")\n\n for classifier in results:\n if classifier == \"knn\":\n file_name = \"knn\"\n x_label = \"Number of Neighbours taken into account\"\n elif classifier == \"forest\":\n file_name = \"forests\"\n x_label = \"Number of Trees generated\"\n elif classifier == \"neural\":\n file_name = \"mlp\"\n x_label = \"Number of Hidden Layers (with 100 perceptrons each)\"\n else:\n x_label = \"Classifier Parameter\"\n\n x_values = []\n y_pre = []\n y_rec = []\n y_f1 = []\n\n for (x, p, r, f) in results[classifier]:\n x_values.append(int(x))\n y_pre.append(float(p))\n y_rec.append(float(r))\n y_f1.append(float(f))\n\n file_name = \"c4-{0}.png\".format(file_name)\n make_figure(x_values, y_pre, y_rec, y_f1, x_label, \"Key Figures\", file_name)\nelse:\n if knn:\n x_label = \"Number of Neighbours taken into account\"\n x = [5, 10, 25, 50, 100, 250]\n y_precision = [0.72, 0.75, 0.74, 0.7, 0.69, 0.64]\n y_recall = [0.74, 0.78, 0.76, 0.71, 0.7, 0.68]\n y_f1 = [0.73, 0.75, 0.71, 0.64, 0.62, 0.57]\n\n if rf:\n x_label = \"Number of Trees generated\"\n x = [10, 20, 50, 250, 500, 1000]\n y_precision = [0.77, 0.78, 0.79, 0.79, 0.8, 0.79]\n y_recall = [0.79, 0.81, 0.81, 0.82, 0.82, 0.81]\n y_f1 = [0.77, 0.78, 0.79, 0.79, 0.79, 0.78]\n\n if mlp:\n x_label = \"Number of Hidden Layers (with 100 perceptrons each)\"\n x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n y_precision = [0.78, 0.8, 0.8, 0.8, 0.79, 0.79, 0.79, 0.79, 0.79, 0.8]\n y_recall = [0.81, 0.82, 0.8, 0.8, 0.8, 0.79, 0.79, 0.8, 0.8, 0.8]\n y_f1 = [0.79, 0.81, 0.8, 0.8, 0.79, 0.79, 0.79, 0.79, 0.79, 0.8]\n\n make_figure(x, y_precision, y_recall, y_f1, x_label, \"Key Figures\", \"figure.png\")","sub_path":"connect4plot.py","file_name":"connect4plot.py","file_ext":"py","file_size_in_byte":4114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"489468971","text":"# note that i cannot import logging here or it will establish default parameters for all the modules\nimport os\nimport sys\nimport json\nimport pandas as pd\nimport numpy as np\nimport datetime\n\nimport src.utils.constants as ct\n\n\ndef create_folder(folder):\n\n import logging\n\n \"\"\"\n Check if a folder exists and create it if necessary.\n \"\"\"\n try:\n if not os.path.exists(folder):\n os.makedirs(folder)\n logging.info('Folder created: {0}'.format(folder))\n except OSError:\n if not os.path.exists(folder):\n logging.error('Error when trying to create folder {0}'.format(folder))\n sys.exit(-1)\n\n return\n\n\ndef tumor_type_taxonomy_checks(config):\n\n import logging\n\n \"\"\"\n Check whether a tumor type label exists in the Oncotree taxonomy and assign long name and level\n \"\"\"\n tumor_taxonomy_info_fp = config['data_paths']['cancer_taxonomy']['per_tumor_data']\n with open(tumor_taxonomy_info_fp) as json_data:\n tumor_taxonomy_info_d = json.load(json_data) # it is a list of dictionaries!\n tumor_taxonomy_info_df = pd.DataFrame.from_records(tumor_taxonomy_info_d)\n\n name, level = np.nan, np.nan\n if config['tumor_type'] not in tumor_taxonomy_info_df['code'].unique():\n logging.warning('WARNING: cancer acronym \"{0}\" is not among the oncotree codes'.format(config['tumor_type']))\n else:\n name = tumor_taxonomy_info_df[tumor_taxonomy_info_df['code']==config['tumor_type']]['name'].values[0]\n level = tumor_taxonomy_info_df[tumor_taxonomy_info_df['code']==config['tumor_type']]['level'].values[0]\n logging.debug('Oncotree code \"{0}\" recognized as {1}, level {2}'.format(config['tumor_type'], name, level))\n\n config['tumor_type_name'] = name\n config['tumor_type_tax_level'] = level\n\n return config\n\n\ndef get_tissue_offspring(tumor_tree_d, tissue):\n\n return list(tumor_tree_d['TISSUE']['children'][tissue]['children'].keys())\n\n\ndef get_tumor_parent(tumor_taxonomy_info_df, tumor, level_end):\n\n import logging\n\n tumor_level = tumor_taxonomy_info_df[tumor_taxonomy_info_df['code']==tumor]['level'].values[0]\n\n if tumor_level == level_end:\n logging.info('Tumor \"{0}\" is already level {1}'.format(tumor, level_end))\n return tumor\n\n if tumor_level < level_end:\n logging.warning('Tumor \"{0}\" is a higher level than {1}'.format(tumor, level_end))\n return tumor\n\n tmp_tumor = tumor\n tmp_level = tumor_level\n while tmp_level > level_end:\n parent = tumor_taxonomy_info_df[tumor_taxonomy_info_df['code'] == tmp_tumor]['parent'].values[0]\n tmp_level = tumor_taxonomy_info_df[tumor_taxonomy_info_df['code'] == parent]['level'].values[0]\n tmp_tumor = parent\n\n logging.info('Level {0} of \"{1}\" is \"{2}\" in the oncotree taxonomy'.format(level_end, tumor, parent))\n\n return parent\n\n\ndef get_broad_cancer_type(config):\n \"\"\"\n given a tumor type, retrieve its primary cancer (e.g. for matching with the gene mutations per cancer data)\n \"\"\"\n import logging\n\n tumor_taxonomy_info_fp = config['data_paths']['cancer_taxonomy']['per_tumor_data']\n with open(tumor_taxonomy_info_fp) as json_data:\n tumor_taxonomy_info_d = json.load(json_data) # it is a list of dictionaries!\n tumor_taxonomy_info_df = pd.DataFrame.from_records(tumor_taxonomy_info_d)\n\n sample_tumor = config['tumor_type']\n sample_tumor_level = tumor_taxonomy_info_df[tumor_taxonomy_info_df['code']==sample_tumor]['level'].values[0]\n\n if sample_tumor_level == 2: # stated tumor type is already level 2\n logging.info('Tumor \"{0}\" is already level 2 in the oncotree taxonomy'.format(sample_tumor))\n return sample_tumor_level, sample_tumor\n\n if sample_tumor_level>2: # stated tumor type is more specific than level 2\n tmp_tumor = sample_tumor\n tmp_level = sample_tumor_level\n while tmp_level > 2:\n parent = tumor_taxonomy_info_df[tumor_taxonomy_info_df['code'] == tmp_tumor]['parent'].values[0]\n tmp_level = tumor_taxonomy_info_df[tumor_taxonomy_info_df['code'] == parent]['level'].values[0]\n tmp_tumor = parent\n logging.info('Tumor \"{0}\" corresponds to level 2 {1} in the oncotree taxonomy'.format(sample_tumor, parent))\n return sample_tumor_level, parent\n\n logging.info('Tumor \"{0}\" is level 1 in the oncotree taxonomy'.format(sample_tumor))\n\n return sample_tumor_level, sample_tumor\n\n\ndef select_toi(config, df, gene_label='gene', transcript_label='transcript'):\n\n import logging\n\n toi_df = pd.read_csv(config['data_paths']['transcript_info']['selected_transcripts'], sep='\\t')\n toi = toi_df[toi_df.columns[1]]\n\n selected_df = df[df[transcript_label].isin(toi)]\n\n original_genes, selected_genes = df[gene_label].unique(), selected_df[gene_label].unique()\n lost_genes = list(set(original_genes)-set(selected_genes))\n if len(lost_genes)>0:\n logging.warning('These genes have no selected transcript:{0}'.format(lost_genes))\n\n return selected_df\n\nimport sys\n\ndef get_data_conf(config):\n\n import logging\n\n \"\"\"\n Check that the data config file is well formated and that all the paths are actually file names that exist\n \"\"\"\n # function to retrieve all the values from a nested dict\n def NestedDictValues(d):\n for v in d.values():\n if isinstance(v, dict):\n yield from NestedDictValues(v)\n else:\n yield v\n\n data_config_dict_fp = config['data_config_fp']\n\n # the path exists\n if not os.path.isfile(data_config_dict_fp):\n logging.error('I cannot find the conf file {0}'.format(data_config_dict_fp))\n sys.exit(-1)\n\n # the config file is json format\n with open(data_config_dict_fp) as json_data:\n try:\n data_config_d = json.load(json_data)\n except:\n logging.error('The sample config file {0} cannot be read as a dictionary'.format(data_config_dict_fp))\n sys.error(-1)\n\n # all the data paths are filenames that exist\n fp_l = list(NestedDictValues(data_config_d))\n for fp in fp_l:\n if not os.path.isfile(fp):\n logging.error('The path {0} stated in {1} is not a file'.format(fp, data_config_dict_fp))\n print('---include sys exit here--')\n #@sys.exit(-1)\n\n # everything ok!\n config['data_paths'] = data_config_d\n logging.debug('Config file {0} ok'.format(data_config_dict_fp))\n\n return config\n\n\ndef get_sw_conf(config):\n\n import logging\n\n sw_config_dict_fp = config['sw_config_fp']\n\n if not os.path.isfile(sw_config_dict_fp):\n logging.error('I cannot find the conf file {0}'.format(sw_config_dict_fp))\n sys.exit(-1)\n\n # the config file is json format\n with open(sw_config_dict_fp) as json_data:\n try:\n sw_config_d = json.load(json_data)\n except:\n logging.error('The sample config file {0} cannot be read as a dictionary'.format(sw_config_dict_fp))\n sys.error(-1)\n\n # everything ok!\n config['sw_paths'] = sw_config_d\n logging.debug('Config file {0} ok'.format(sw_config_dict_fp))\n\n return config\n\n\ndef get_sample_conf(config):\n\n import logging\n\n \"\"\"\n Check that the sample config file is well formated and with the required fields, as well as whether\n the sample analysis label to make the report for have the corresponding entries in such file\n \"\"\"\n sample_config_dict_fp = config['sample_config_fp']\n sample_analysis_label = config['sample_analysis_label']\n\n # the path exists\n if not os.path.isfile(sample_config_dict_fp):\n logging.error('I cannot find the conf file {0}'.format(sample_config_dict_fp))\n sys.exit(-1)\n\n # the config file is json format\n with open(sample_config_dict_fp) as json_data:\n try:\n sample_config_d = json.load(json_data)\n except:\n logging.error('The sample config file {0} cannot be read as a dictionary'.format(sample_config_dict_fp))\n sys.error(-1)\n\n # the sample analysis identifier has an entry in the config file\n if sample_analysis_label not in sample_config_d:\n logging.error('The sample label {0} for the sample-level analysis does not exist in {1}'.format(sample_analysis_label,\n sample_config_dict_fp))\n sys.exit(-1)\n\n # everything ok!\n config['sample_analysis'] = sample_config_d[sample_analysis_label]\n logging.debug('Config file {0} ok'.format(sample_config_dict_fp))\n\n return config\n\n\ndef get_ct_conf(config):\n\n import logging\n\n \"\"\"\n Check that the clinical trials config file is well formated and with the required fields, as well as whether\n the clinical trials to make the report for have the corresponding entries in such file\n \"\"\"\n ct_config_dict_fp = config['ct_config_fp']\n clinical_trial_labels = config['clinical_trial_labels']\n\n # the path exists\n if not os.path.isfile(ct_config_dict_fp):\n logging.error('I cannot find the conf file {0}'.format(ct_config_dict_fp))\n sys.exit(-1)\n\n # the config file is json format\n with open(ct_config_dict_fp) as json_data:\n try:\n ct_config_d = json.load(json_data)\n except:\n logging.error('The clinical trial config file {0} cannot be read as a dictionary'.format(ct_config_dict_fp))\n sys.error(-1)\n\n # check the per clinical trial info\n for clinical_trial_label in clinical_trial_labels:\n\n root_required_fields = ['arms']\n arm_required_fields = ['qualifying_genomics', 'alteration_analysis']\n\n # check the root required:\n for field in root_required_fields:\n if field not in ct_config_d[clinical_trial_label]:\n logging.error('Field {0} should have an entry in {0} (file {1})'.format(field, clinical_trial_label, ct_config_dict_fp))\n sys.exit(-1)\n\n # check the arms required info:\n for arm in ct_config_d[clinical_trial_label]['arms']:\n for field in arm_required_fields:\n if field not in ct_config_d[clinical_trial_label]['arms'][arm]:\n logging.error('A \"{0}\" field needs to be included in the {1} entry, arm {2} (file {3})'.\n format(field, clinical_trial_label, arm, ct_config_dict_fp))\n sys.exit(-1)\n\n # check that the qualifying genomics are for correct gene names\n toi_fp = config['data_paths']['transcript_info']['selected_transcripts']\n toi_df = pd.read_csv(toi_fp, sep='\\t')\n goi = toi_df['HGNC symbol'].unique()\n for clinical_trial_label in clinical_trial_labels:\n for arm in ct_config_d[clinical_trial_label]['arms']:\n for qualifying_genomics in ct_config_d[clinical_trial_label]['arms'][arm]['qualifying_genomics']:\n if ':' in qualifying_genomics:\n gene = qualifying_genomics.split(':')[0]\n if gene not in goi:\n logging.error('Gene {0} stated in ct {1} arm {2} does not exist in canonical transcript file {3}'.format(gene, clinical_trial_label, arm, toi_fp))\n\n # everything ok!\n config['ct_analysis'] = ct_config_d\n logging.debug('Config file {0} ok'.format(ct_config_dict_fp))\n\n return config\n\n\n# add version details as parsed from a conf file to an output dict structure\ndef add_pipeline_details(config, out_json):\n\n import logging\n\n # execution date\n now = datetime.datetime.now()\n out_json[ct.out['execution_date']] = now.strftime(\"%Y-%m-%d\")\n\n # version\n\n pipeline_json_fp = config['pipeline_version_fp']\n\n # the path exists\n if not os.path.isfile(pipeline_json_fp):\n logging.warning('I cannot find the conf file {0}'.format(pipeline_json_fp))\n return out_json\n\n # the config file is ok json format\n with open(pipeline_json_fp) as json_data:\n try:\n version_config_d = json.load(json_data)\n except:\n logging.warning(\n 'The pipeline version config file {0} cannot be read as a dictionary'.format(pipeline_json_fp))\n return out_json\n\n latest_version = version_config_d['latest_version']\n version_details_d = version_config_d['version_details'][latest_version]\n data_release_d = version_config_d['data_releases'][version_details_d['data_release']]\n\n out_json[ct.out['pipeline_version_details_label']] = {'version': latest_version,\n 'details': version_details_d,\n 'data_release': data_release_d}\n\n return out_json\n\n\n\n","sub_path":"src/utils/utils_lib.py","file_name":"utils_lib.py","file_ext":"py","file_size_in_byte":12778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"53897594","text":"import bluetooth\nimport time\nimport serial\nimport sys\nimport qpt_v2\n\n\nprint('Connecting...')\nbd_addr = \"00:21:13:02:59:1A\"\nport = 1\nsock = bluetooth.BluetoothSocket( bluetooth.RFCOMM )\nsock.connect((bd_addr, port))\nprint ('Connected')\nsock.settimeout(20)\n\ndef read_value():\n #sock.send('1')\n char = sock.recv(1).decode()\n return int(char)\n \ndef joystick_control():\n while True:\n value = read_value()\n if(value == 1):\n qpt_v2.move(qpt_v2.up_ccw_msg)\n elif(value == 2):\n qpt_v2.move(qpt_v2.up_cw_msg)\n elif(value == 3):\n qpt_v2.move(qpt_v2.down_ccw_msg)\n elif(value == 4):\n qpt_v2.move(qpt_v2.down_cw_msg)\n elif(value == 5):\n qpt_v2.move(qpt_v2.ccw_msg)\n elif(value == 6):\n qpt_v2.move(qpt_v2.cw_msg)\n elif(value == 7):\n qpt_v2.move(qpt_v2.up_msg)\n elif(value == 8):\n qpt_v2.move(qpt_v2.down_msg)\n elif(value == 9):\n print ('At: {}'.format(qpt_v2.get_degrees('default')))\n else:\n qpt_v2.stop_move()\n \n\ndef quit_option():\n option = input()\n if(option == 'q' or option == 'Q'):\n print('Program Terminated')\n value[0] = 1\n exit()\n \ndef main(): \n joystick_control()\n \n\nif __name__ == '__main__':\n main()\n \n","sub_path":"Master Files/Dish-Tracker-master-885e8c34e3964a0a510c6822bf6adbcceabbdc97/Code/joystick_control.py","file_name":"joystick_control.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"514723854","text":"#\r\n#-----------\r\n#\r\n\r\nimport matplotlib.pyplot as plt\r\nfrom ariadne import *\r\nfrom statistics import mean\r\nimport numpy as np\r\nimport pandas as pd\r\nimport time\r\n\r\n\r\ndef print_function():\r\n \r\n # Print all the function in the screen\r\n\r\n print(\"1. x*x-2\")\r\n print(\"2. x*x*x-2\")\r\n print(\"3. (x-3)*(x-3)*(x+1)-2\")\r\n print(\"4. (x-1)*(x+2)*(x+2)*(x+2)*(x-2)*(x-2)-1\")\r\n\r\ndef intialization_method():\r\n \r\n # Print all the method in the screen\r\n \r\n print(\"1. General Newton Method\")\r\n print(\"2. General Newton Method with Initial estimator by using sign\")\r\n print(\"3. General Newton Method with Initial estimator by using contractor\")\r\n print(\"4. General Newton Method with Initial estimator by using sign method and second derivatives\")\r\n print(\"5. General Newton Method with Initial estimator by using contractor method and second derivatives\")\r\n\r\n\r\ndef select_function(function_number, x):\r\n \r\n \"\"\"\r\n Input:\r\n function_number -- choice of function assigned to number,\r\n x -- function variable is x\r\n Output:\r\n f -- function chosen with variable x\r\n \"\"\"\r\n\r\n if function_number == 1:\r\n f = x * x - 2\r\n elif function_number == 2:\r\n #f = x * x * x * x + x * x * x #X^4-2 and x^4-12 ;X^4+X^3\r\n f = pow(x, 3) - 2\r\n elif function_number == 3:\r\n f = (x - 3) * (x - 3) * (x + 1) - 2\r\n #(x - 1) * (x - 1) * (x - 1) + (x - 4) * (x - 4) - 8\r\n else:\r\n f = (x - 1) * (x + 2) * (x + 2) * (x + 2) * (x - 2) * (x - 2) - 1\r\n \r\n return f\r\n\r\ndef get_label(function_number):\r\n \r\n # use to display label in the graph\r\n \r\n if function_number == 1:\r\n return \"x * x - 2\"\r\n elif function_number == 2:\r\n return \"x*x*x-2\"\r\n elif function_number == 3:\r\n return \"(x-3)*(x-3)*(x+1)-2\"\r\n else:\r\n return \"(x-1)*(x+2)*(x+2)*(x+2)*(x-2)*(x-2)-1\"\r\n \r\n return f\r\n\r\ndef make_interval(bnd1, bnd2):\r\n \r\n \"\"\"\r\n Make an interval [bnd1, bnd2] for the contractor method.\r\n Input:\r\n bnd1 and bnd2 -- two separate single values\r\n Output:\r\n x -- single value with interval [bnd1, bnd2] or [bnd2, bnd1]\r\n \"\"\"\r\n \r\n bnd1 = str(bnd1)\r\n bnd2 = str(bnd2)\r\n dpr=DoublePrecision()\r\n if bnd1 < bnd2:\r\n x=FloatDPBounds(Decimal(bnd1),Decimal(bnd2),dpr)\r\n else:\r\n x=FloatDPBounds(Decimal(bnd2),Decimal(bnd1),dpr)\r\n \r\n #print(\"interval: \",x)\r\n return x\r\n\r\ndef get_estimator_contractor(f, x, step, k1):\r\n \r\n \"\"\"\r\n Contracting method is done by evalutating the sign of interval,\r\n the input x is the starting point of the newton step to x_new.\r\n The fr interval is constructed by [x, x_new], and the function is\r\n evaluated for all values in this interval. If the interval is strictly\r\n positive or strictly negative no root has been found, so the program will\r\n proceed to the next iteration from initial x with value of k doubled.\r\n The program will break if there is a sign difference found in the interval.\r\n The output is the contracted interval with proof of root. In our implementation,\r\n we set the loop for maximum 100 iterations.\r\n -------------------------------------------------------------------\r\n Input:\r\n f -- function chosen with variable x\r\n x_init -- single point taken from inital input\r\n step -- counter of newton steps performed, initially step=0\r\n k -- k is a parameter, which helps to find the next bound quickly\r\n ----------------------------------------------------------------------\r\n Output:\r\n x_prev -- lowerbound of the interval\r\n x_new -- upperbound of interval with proof of root\r\n step -- total number of iteration need to find perfect x_new\r\n \"\"\"\r\n fx=f(x)\r\n dfx=derivative(f,x)\r\n \r\n if decide(dfx==0):\r\n print(\"Zero derivative. No solution found in IE contractor.For x=\",x)\r\n return x,x,0\r\n\r\n h = fx /dfx\r\n k_step = k1\r\n xp=x\r\n for j in range(1, 100):\r\n step = step + 1\r\n x_new = x - k_step * h\r\n #print(\"xp x_new=\",xp,x_new)\r\n fr =f(make_interval(xp, x_new))\r\n k_step = k_step * 2\r\n if not (definitely((fr) >= 0) | definitely((fr) <= 0)):\r\n return xp, x_new, step\r\n else:\r\n xp=x_new\r\n\r\n print(\"Limit need to Increass\")\r\n return x,x,-1\r\n\r\n\r\ndef newton_method(f, x, Ep, step, rootdisplay):\r\n \r\n \"\"\"\r\n The newton method is used for finding the exact root from the interval\r\n constructed by previous defined estimator program (contractor, sign).\r\n The newton method will iterate in this interval until it is sufficiently\r\n close to the true root from single bound of interval. This is achieved\r\n when f(x)/f'(x) is smaller or equal to epsilon. The step to achieve this\r\n are counted and outputed, to be added to the steps counted in the estimator program.\r\n Input:\r\n f -- function chosen with variable x\r\n x -- input variable\r\n Ep -- Epsilon (= .00001)\r\n step -- counter of newton step's, initially step=0\r\n rootdisplay -- if we want to see the true root\r\n Output:\r\n step -- return the total number of newton step's are required to reach to closer to true root\r\n \"\"\"\r\n \r\n \r\n while True:\r\n step = step + 1\r\n fx=f(x)\r\n dfx= derivative(f, x)\r\n if decide(dfx==0):\r\n print(\"Zero derivative. No solution found in NM.For x=\",x)\r\n return 0\r\n h = fx / dfx\r\n x = x - h\r\n if (decide(abs(h) <= Ep)):\r\n break\r\n if rootdisplay:\r\n print(\"Root in Approximation: \", x)\r\n return step\r\n\r\n\r\ndef get_estimator_sign_second(f, x, step_initial,k):\r\n \r\n fx = f(x)\r\n sign1 = decide(fx > 0) and 1 or -1\r\n k_step = k\r\n deg=2\r\n dfx=differential(f,x,deg)\r\n if decide(dfx[(2,)]==0):\r\n print(\"Zero derivative. No solution found in IE(second derivative)sign method.For x=\",x)\r\n return x,x,0\r\n #h = FloatDPApproximation(dfx[(1,)])/ (2*FloatDPApproximation(dfx[(2,)]))\r\n #h = fx/ (2*FloatDPApproximation(dfx[(2,)]))\r\n #print(\"x fx dfx h \",x,fx,dfx[(2,)], h)\r\n fd1=FloatDPApproximation(dfx[(1,)])\r\n fd2=2*FloatDPApproximation(dfx[(2,)])\r\n#print(\"x fx fd1 fd2 :=\",x,fx,fd1,fd2)\r\n#df1s=fd1*fd1\r\n#print(\"fd1:=\",df1s)\r\n#b=(2*fd2*fx)\r\n#print(\"b:=\",b)\r\n sq=sqrt(abs((fd1*fd1)-(2*fd2*fx)))\r\n print(\"sq:=\",sq)\r\n h=(sq-fd1)/fd2\r\n print(\"h:=\",h)\r\n xp=x\r\n step = step_initial\r\n for j in range(1,10):\r\n #print(\"j\",j)\r\n step = step + 1\r\n x_new = x + h * k_step\r\n k_step = k_step * 2 # make the k double in each iteration\r\n fx_new = f(x_new)\r\n sign2 = decide(fx_new > 0) and 1 or -1\r\n if not (sign1 == sign2):\r\n return xp,x_new, step\r\n else:\r\n xp=x_new\r\n\r\n #print(\"j sign1 sign2\",j,sign1,sign2)\r\n#h = fx/ (2*FloatDPApproximation(dfx[(2,)]))\r\n #print(\"x fx dfx h \",x,fx,dfx[(2,)], h)\r\n#xp=x\r\n#step = step_initial\r\n#for j in range(1,10):\r\n #print(\"j\",j)\r\n #step = step + 1\r\n #x_new = x - k_step * h\r\n #k_step = k_step * 2 # make the k double in each iteration\r\n #fx_new = f(x_new)\r\n #sign2 = decide(fx_new > 0) and 1 or -1\r\n #if not (sign1 == sign2):\r\n # return xp,x_new, step\r\n #else:\r\n #xp=x_new\r\n\r\n print(\"limit need to Increase\")\r\n return x,x,-1\r\n\r\n\r\n\r\ndef get_estimator_sign_second_con(f,x, step_initial,k):\r\n \r\n fx = f(x)\r\n sign1 = decide(fx > 0) and 1 or -1\r\n k_step = k\r\n deg=2\r\n dfx=differential(f,x,deg)\r\n if decide(dfx[(2,)]==0):\r\n print(\"Zero derivative. No solution found in IE(second derivative)contractor method.For x=\",x)\r\n return x,x,0\r\n \r\n #h = FloatDPApproximation(dfx[(1,)]) / (2*FloatDPApproximation(dfx[(2,)]))\r\n #h = FloatDPApproximation(dfx[(1,)])/ (2*FloatDPApproximation(dfx[(2,)]))\r\n #h = fx/ (2*FloatDPApproximation(dfx[(2,)]))\r\n #print(\"x fx dfx h \",x,fx,dfx[(2,)], h)\r\n fd1=FloatDPApproximation(dfx[(1,)])\r\n fd2=2*FloatDPApproximation(dfx[(2,)])\r\n #print(\"x fx fd1 fd2 :=\",x,fx,fd1,fd2)\r\n #df1s=fd1*fd1\r\n #print(\"fd1:=\",df1s)\r\n #b=(2*fd2*fx)\r\n #print(\"b:=\",b)\r\n sq=sqrt(abs((fd1*fd1)-(2*fd2*fx)))\r\n print(\"sq in con :=\",sq)\r\n h=(sq-fd1)/fd2\r\n print(\"h:=\",h)\r\n\r\n xp=x\r\n step = step_initial\r\n for j in range(1,10):\r\n step = step + 1\r\n x_new = x - k_step * h\r\n k_step = k_step * 2 # make the k double in each iteration\r\n fx_new = f(make_interval(xp, x_new))\r\n sign2 = decide(fx_new > 0) and 1 or -1\r\n if not (sign1 == sign2):\r\n return xp,x_new, step\r\n else:\r\n xp=x_new\r\n\r\n print(\"limit need to Increase\")\r\n return x,x,-1\r\n\r\n\r\ndef get_estimator_sign(f, x, step,k):\r\n \r\n \"\"\"\r\n Single point taken as initial input (x) and then evaluate it,(f(x)).\r\n By the evaluation, we try to find the sign of the function at the input point,\r\n The sign of input point is stored in the variable sign1.\r\n We try to find a point x_new, when evaluated (f(x_new) and sign stored in sign2) has a different sign compared to sign1.\r\n x_new is calculated by x_new=x-k*h with h=f(x)/f'(x). Initially, k=2. If sign1 and sign2 are same then replace the value of x_new by x-k*h,\r\n where the value of k is doubled.\r\n The iteration will break if sign1 and sign2 are not same or max k is reached.\r\n In our implementation, we set the loop for k from 1 to 50000.\r\n ------------------------------------------------------------------\r\n Input:\r\n f -- function chosen with variable x\r\n x -- single point taken from inital input\r\n step -- counter of newton steps performed, initially step=0\r\n k ---k is a parameter, which helps to find the next bound quickly\r\n ----------------------------------------------------------------------\r\n Output:\r\n x -- the initial input\r\n x_new -- upperbound of interval with proof of root\r\n step -- total number of iteration need to find perfect x_new\r\n \"\"\"\r\n \r\n fx = f(x)\r\n sign1 = decide(fx > 0) and 1 or -1\r\n k_step = k\r\n dfx=derivative(f, x)\r\n #print(\"the dfx\",dfx)\r\n if decide(dfx==0):\r\n print(\"Zero derivative. No solution found in IE sign method.For x=\",x)\r\n return x,x,0\r\n h = fx /dfx\r\n xp=x\r\n for j in range(1, 100):\r\n step = step + 1\r\n x_new = x - k_step * h\r\n k_step = k_step * 2 # make the k double in each iteration\r\n fx_new = f(x_new)\r\n sign2 = decide(fx_new > 0) and 1 or -1\r\n if not (sign1 == sign2):\r\n return xp,x_new, step\r\n else:\r\n xp=x_new\r\n\r\n print(\"limit need to Increase\")\r\n return x,x,-1\r\n\r\ndef askbool(message):\r\n\r\n # check bool type of question\r\n \r\n a = input(message)\r\n if a is not \"\":\r\n if a.lower() in [\"y\", \"yes\", \"t\", \"tr\", \"true\", \"1\"]:\r\n return True\r\n return False\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n \"\"\"\r\n Input:\r\n function_number -- User defines function\r\n total_method -- Total methods used\r\n input_type_single -- single input by user\r\n input_r -- Interval range\r\n k -- value of k\r\n --------------------------------------------------------------------------------------------------------\r\n Ep --Epsilon is a smallest positive integer (type floatDPApproximation)\r\n f --User selected function\r\n x --independent variable\r\n bound1 --It's a first bound for the initial interval, which is claculated from Input_x\r\n bound2 --It's the second bound, which is the return from the initial_estimator function\r\n counter --it's a variable, it use for the counting the iteration.\r\n -----------------------------------------------------------------------------------------------------------\r\n step_initial_estimator -- The total steps need to find initial estimator\r\n step_newton_method -- The total steps with general Newton method\r\n step_newton_method_estimator -- The total steps with Newton method with using initial estimator as pre-processor\r\n step_newton_method_bidirectional -- The total steps with Newton method bidirectional with using initial estimator as pre-processor\r\n ---------------------------------------------------------------------------------------------------------------\r\n Output:\r\n Display the The total steps to find the solution of function with general Newton method.\r\n Display the The total steps to find the solution of function by using Newton method and initial estimator\r\n Display the The total steps to find the solution of function by using Newton method bidirectional and initial estimator\r\n The program is a user interacting interface, so the user can input its own preferences.\r\n First the user is asked to choose a one dimensional function that the program will use to validate a root.\r\n Next the program will display the available methods for solving the rootfinding problem, and the user can\r\n choose how many it may use. After choosing a selection of the methods the program will ask for a single input\r\n or input range. A single input will initialize the program from this point, an interval will initialize for\r\n every integer value in the interval. When the user choose the interval option, the interval needs to be specified.\r\n Next, the value of k is asked to the user. The results are shown for the methods the user choose,\r\n number of iterations and root. Also for interval input, a plot will be made where the count of loop iterations is displayed.\r\n \"\"\"\r\n \r\n \r\n generel_nm = []\r\n input_range = []\r\n initialestimator_nm = []\r\n initialestimator_nm_con = []\r\n second_nm = []\r\n second_nm_con = []\r\n all_method = []\r\n t_start=0\r\n t_end=0\r\n k_list=[]\r\n generel_nm_time = []\r\n initialestimator_nm_time = []\r\n initialestimator_nm_con_time = []\r\n second_nm_time = []\r\n second_nm_con_time = []\r\n mean_step_ge=[]\r\n mean_step_ie=[]\r\n mean_step_ie_con=[]\r\n mean_step_ie_second=[]\r\n mean_step_ie_second_con=[]\r\n mean_step_ge_time=[]\r\n mean_step_ie_time=[]\r\n mean_step_ie_con_time=[]\r\n mean_step_ie_second_time=[]\r\n mean_step_ie_second_con_time=[]\r\n initialestimator_nm_one_list =[]\r\n initialestimator_nm_con_one_list =[]\r\n second_nm_one_list =[]\r\n second_nm_con_one_list =[]\r\n \r\n #temp=FloatDPBounds(Decimal(1.2),DoublePrecision())\r\n #print(\"temp \",temp.upper()-temp.lower())\r\n # To get the value of Epsilon\r\n #pr = DoublePrecision()\r\n #epp=FloatDP.eps(pr)\r\n pr = DoublePrecision()\r\n Ep = FloatDP.eps(pr)\r\n #print(\"type ep\",type(Ep),Ep)\r\n Ep = FloatDPApproximation(Ep)\r\n \r\n print(\"\")\r\n print(\"Welcome...\")\r\n print(\"Testing Section active\")\r\n print(\"\")\r\n \r\n # initialization with Some functions\r\n print_function()\r\n function_number = int(input(\"Select an equation (1 to 4):\"))\r\n \r\n # Find our Selected function\r\n x = EffectiveScalarUnivariateFunction.identity()\r\n f = select_function(function_number, x)\r\n print(\"Function: \", f)\r\n \r\n intialization_method()\r\n \r\n total_method = int(input(\"How many method you want to use (1 to 5)?:\"))\r\n \r\n for m in range(total_method):\r\n all_method.append(int(input(\"Method:\")))\r\n\r\n input_type_single = askbool(\"Are you want to use single input? [default: range input] \")\r\n\r\n if not (input_type_single):\r\n input_r= int(input(\"Input range of the approximation root X (-X,+X): \"))\r\n #k = float(input(\"The value of k:\"))\r\n kr=0\r\n for k1 in range(kr, kr+5):\r\n k=k1+0.65 #0.85,0.75 & 0.95\r\n k_list.append(k)\r\n interval = 1\r\n initialestimator_nm_one =0\r\n initialestimator_nm_con_one =0\r\n second_nm_one =0\r\n second_nm_con_one =0\r\n \r\n for a in np.arange(-input_r, input_r+1,0.1):\r\n #a=a1+a1/10\r\n print(\"a:=\",a)\r\n input_x = interval * a\r\n input_range.append(input_x)\r\n x = FloatDPApproximation(input_x)\r\n counter = 0\r\n rootdisplay = 0\r\n \r\n #hh=sqrt(x)\r\n #print(\"type of hh=\",type(hh),hh)\r\n \r\n if not (decide(f(x) == 0)):\r\n for j in range(len(all_method)):\r\n if all_method[j] == 1:\r\n t_start=time.process_time()\r\n step_general_nm = newton_method(f, x, Ep, counter, rootdisplay)\r\n t_end=time.process_time()\r\n if step_general_nm==0:\r\n generel_nm_time.append(0)\r\n else:\r\n generel_nm_time.append(t_end-t_start)\r\n generel_nm.append(step_general_nm)\r\n for j in range(len(all_method)):\r\n if all_method[j] == 2:\r\n t_start=time.process_time()\r\n bound1, bound2, step = get_estimator_sign(f, x, counter, k)\r\n t_end=time.process_time()\r\n if not decide(bound1==bound2):\r\n bound1=(bound1+bound2)/2\r\n step_nm = newton_method(f, bound1, Ep, counter, rootdisplay)\r\n initialestimator_nm_time.append(t_end-t_start)\r\n initialestimator_nm.append(step)\r\n if step==1:\r\n #print(\"b1 b2\",bound1,bound2)\r\n initialestimator_nm_one = initialestimator_nm_one+1\r\n else:\r\n initialestimator_nm_time.append(0)\r\n initialestimator_nm.append(0)\r\n for j in range(len(all_method)):\r\n if all_method[j] == 3:\r\n t_start=time.process_time()\r\n bound1_con, bound2_con, step_con = get_estimator_contractor(f, x, counter, k)\r\n t_end=time.process_time()\r\n if not decide(bound1_con==bound2_con):\r\n bound1_con=(bound1_con+bound2_con)/2\r\n step_nm_con = newton_method(f, bound1_con, Ep, counter, rootdisplay)\r\n t_end=time.process_time()\r\n initialestimator_nm_con_time.append(t_end-t_start)\r\n initialestimator_nm_con.append(step_con)\r\n if step_con==1:\r\n initialestimator_nm_con_one = initialestimator_nm_con_one + 1\r\n else:\r\n initialestimator_nm_con_time.append(0)\r\n initialestimator_nm_con.append(0)\r\n for j in range(len(all_method)):\r\n if all_method[j] == 4:\r\n t_start=time.process_time()\r\n bound1_second, bound2_second, step_second= get_estimator_sign_second(f, x, counter, k)\r\n t_end=time.process_time()\r\n if not decide(bound1_second==bound2_second):\r\n bound1_second=(bound1_second+bound2_second)/2\r\n step_ge_sign_second= newton_method(f,bound1_second, Ep, counter, rootdisplay)\r\n #t_end=time.process_time()\r\n second_nm_time.append(t_end-t_start)\r\n second_nm.append(step_second)\r\n if step_second==1:\r\n second_nm_one =second_nm_one+1\r\n else:\r\n second_nm_time.append(0)\r\n second_nm.append(0)\r\n for j in range(len(all_method)):\r\n if all_method[j] == 5:\r\n t_start=time.process_time()\r\n bound1_second_con, bound2_second_con, step_second_con= get_estimator_sign_second_con(f, x, counter, k)\r\n t_end=time.process_time()\r\n if not decide(bound1_second_con==bound2_second_con):\r\n #print(\"b1 b2\",bound1_second_con,bound2_second_con)\r\n bound1_second_con=(bound1_second_con+bound2_second_con)/2\r\n #print(\"After b1\",bound1_second_con)\r\n \r\n step_ge_second_con = newton_method(f,bound1_second_con, Ep, counter, rootdisplay)\r\n \r\n second_nm_con_time.append(t_end-t_start)\r\n second_nm_con.append(step_second_con)\r\n if step_second_con == 1:\r\n second_nm_con_one = second_nm_con_one + 1\r\n else:\r\n second_nm_con_time.append(0)\r\n second_nm_con.append(0)\r\n\r\n\r\n #print(\"T\",np.array(generel_nm)[np.nonzero(np.array(generel_nm))].mean())\r\n print(\"one\",initialestimator_nm_one,initialestimator_nm_con_one,second_nm_one,second_nm_con_one)\r\n initialestimator_nm_one_list.append(initialestimator_nm_one)\r\n initialestimator_nm_con_one_list.append(initialestimator_nm_con_one)\r\n second_nm_one_list.append(second_nm_one)\r\n second_nm_con_one_list.append(second_nm_con_one)\r\n mean_step_ge.append(np.array(generel_nm)[np.nonzero(np.array(generel_nm))].mean())\r\n mean_step_ie.append(np.array(initialestimator_nm)[np.nonzero(np.array(initialestimator_nm))].mean())\r\n mean_step_ie_con.append(np.array(initialestimator_nm_con)[np.nonzero(np.array(initialestimator_nm_con))].mean())\r\n mean_step_ie_second.append(np.array(second_nm)[np.nonzero(np.array(second_nm))].mean())\r\n mean_step_ie_second_con.append(np.array(second_nm_con)[np.nonzero(np.array(second_nm_con))].mean())\r\n #print(\"time:=\",np.array(generel_nm_time)[np.nonzero(np.array(generel_nm_time))].mean())\r\n mean_step_ge_time.append(np.array(generel_nm_time)[np.nonzero(np.array(generel_nm_time))].mean())\r\n mean_step_ie_time.append(np.array(initialestimator_nm_time)[np.nonzero(np.array(initialestimator_nm_time))].mean())\r\n mean_step_ie_con_time.append(np.array(initialestimator_nm_con_time)[np.nonzero(np.array(initialestimator_nm_con_time))].mean())\r\n mean_step_ie_second_time.append(np.array(second_nm_time)[np.nonzero(np.array(second_nm_time))].mean())\r\n mean_step_ie_second_con_time.append(np.array(second_nm_con_time)[np.nonzero(np.array(second_nm_con_time))].mean())\r\n \r\n \r\n N=5\r\n ind=np.arange(N)\r\n width=0.12\r\n fig=plt.figure()\r\n r1=plt.bar(ind,mean_step_ge,width,color='r')\r\n r2=plt.bar(ind+width,mean_step_ie,width,color='g')\r\n r3=plt.bar(ind+width*2,mean_step_ie_con,width,color='b')\r\n r4=plt.bar(ind+width*3,mean_step_ie_second,width,color='y')\r\n r5=plt.bar(ind+width*4,mean_step_ie_second_con,width,color='m')\r\n plt.xticks([r+width*2 for r in range(N)],k_list)\r\n titlelabels = \"Function: {}\".format(get_label(function_number))\r\n plt.title(titlelabels)\r\n plt.ylabel(\"Number of Average Steps\",fontsize=20)\r\n plt.xlabel(\"k\",fontsize=20)\r\n plt.legend((r2[0],r3[0],r4[0],r5[0]),('NM_IE_SIGN','NM_IE_CON','NM_IE_SIGN_TAYLOR','NM_IE_CON_TAYLOR'))\r\n plt.show()\r\n\r\n r1=plt.bar(ind,mean_step_ge_time,width,color='r')\r\n r2=plt.bar(ind+width,mean_step_ie_time,width,color='g')\r\n r3=plt.bar(ind+width*2,mean_step_ie_con_time,width,color='b')\r\n r4=plt.bar(ind+width*3,mean_step_ie_second_time,width,color='y')\r\n r5=plt.bar(ind+width*4,mean_step_ie_second_con_time,width,color='m')\r\n plt.xticks([r+width*2 for r in range(N)],k_list)\r\n titlelabels = \"Function: {}\".format(get_label(function_number))\r\n plt.title(titlelabels)\r\n plt.ylabel(\"Number of Average time(nanoseconds)\",fontsize=20)\r\n plt.xlabel(\"k\",fontsize=20)\r\n plt.legend((r2[0],r3[0],r4[0],r5[0]),('NM_IE_SIGN','NM_IE_CON','NM_IE_SIGN_TAYLOR','NM_IE_CON_TAYLOR'))\r\n plt.show()\r\n\r\n\r\n else:\r\n \r\n input_x = float(input(\"Input a approximation to root: \"))\r\n k = float(input(\"The value of k:\"))\r\n x = FloatDPApproximation(input_x)\r\n counter = 0\r\n rootdisplay = 1\r\n \r\n if not (decide(f(x) == 0)):\r\n for j in range(len(all_method)):\r\n if all_method[j] == 1:\r\n t_start=time.process_time()\r\n step_general_nm = newton_method(f, x, Ep, counter, rootdisplay)\r\n print(\"The total steps in without initial estimator(\", input_x, \"):= \", step_general_nm)\r\n t_end=time.process_time()\r\n print(\"The total TIME in without initial estimator(\", input_x, \"):= \",t_end-t_start)\r\n \r\n for j in range(len(all_method)):\r\n if all_method[j] == 2:\r\n t_start=time.process_time()\r\n bound1, bound2, step = get_estimator_sign(f, x, counter, k)\r\n if not(decide(bound1==bound2)):\r\n step_nm = newton_method(f, bound2, Ep, counter, rootdisplay)\r\n print(\"The total steps in with initial estimator sign method(\", bound2, \"):= \", step_nm + step)\r\n t_end=time.process_time()\r\n print(\"The total TIME in with initial estimator sign method(\", bound2, \"):= \",t_end-t_start)\r\n \r\n for j in range(len(all_method)):\r\n if all_method[j] == 3:\r\n t_start=time.process_time()\r\n bound1_con, bound2_con, step_con = get_estimator_contractor(f, x, counter, k)\r\n if not(decide(bound1_con==bound2_con)):\r\n step_nm_con = newton_method(f, bound2_con, Ep, counter, rootdisplay)\r\n t_end=time.process_time()\r\n print(\"The total steps in with initial estimator contractor method(\", bound2_con, \"):= \",step_nm_con + step_con)\r\n print(\"The total TIME in with initial estimator contractor method(\", bound2_con, \"):= \",t_end-t_start)\r\n \r\n for j in range(len(all_method)):\r\n if all_method[j] == 4:\r\n t_start=time.process_time()\r\n bound1_second, bound2_second, step_second= get_estimator_sign_second(f, x, counter, k)\r\n if not(decide(bound1_second==bound2_second)):\r\n step_ge_sign_second= newton_method(f,bound2_second, Ep, counter, rootdisplay)\r\n t_end=time.process_time()\r\n print(\"The total steps in with initial estimator sign method(second derivatives)(\", bound2_second, \"):= \",step_ge_sign_second + step_second)\r\n print(\"The total time in with initial estimator sign method(second derivatives)(\", bound2_second, \"):= \",t_end-t_start)\r\n \r\n for j in range(len(all_method)):\r\n if all_method[j] == 5:\r\n t_start=time.process_time()\r\n bound1_second_con, bound2_second_con, step_second_con= get_estimator_sign_second_con(f, x, counter, k)\r\n if not(decide(bound1_second_con==bound2_second_con)):\r\n step_ge_second_con = newton_method(f,bound2_second_con, Ep, counter, rootdisplay)\r\n t_end=time.process_time()\r\n print(\"The total steps in contractor with initial estimator contractor method(second derivatives)(\",bound2_second_con, \"):= \",\r\n step_ge_second_con + step_second_con)\r\n print(\"The total time in with initial estimator contractor method(second derivatives)(\", bound2_second_con, \"):= \",t_end-t_start)\r\n","sub_path":"nm_test02.py","file_name":"nm_test02.py","file_ext":"py","file_size_in_byte":29479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"323827024","text":"import gzip\nimport logging\nimport re\nimport threading\nimport time\n\n#\n# OID Example: 0af58541:15affd7e1d7:173:11\noid_pattern = re.compile(\"([0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+)\")\n\n#\n# Another OID Example: [qtp1519842251-2260-0af58541:15affd7e1d7:370:21-s3-10.247.201.37]\noid_in_threadname = re.compile(\"\\[\\S+-\\d+-([0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+)-\\S+-\\S+\\]\")\n\n#\n# Another OID Example: [TaskScheduler-BlobService-COMMUNICATOR-ParallelExecutor-1187-0af9fab9:15b6ab2a540:1c0d:3ff]\noid_in_threadname2 = re.compile(\"\\[\\S+-\\d+-([0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+)\\]\")\n\n#\n# Date Example: 2017-03-24T09:14:36,415\ndate_pattern = re.compile(\"(\\d+-\\d+-\\d+T\\d+:\\d+:\\d+,\\d+)\")\n\n\n#\n# ObjectId: \nobject_id_pattern = re.compile(\"([0-9a-f]{64})\")\n\nlogger = logging.getLogger('parse-ecs-log')\n\n\nclass LineReader(threading.Thread):\n def __init__(self, filename, tracker):\n threading.Thread.__init__(self)\n # File to process\n self.filename = filename\n # Data structure: key-to-count\n self.tracker = tracker\n # Track number of lines processed\n self.line_count = 0\n # Track number of tokens\n self.tokens_processed_count = 0\n\n #\n # Keep track of the dates found in the lines as we process them\n self.last_date = \"\"\n\n #\n # Completion flag\n self.completed = False\n\n def run(self):\n if self.filename.endswith(\".gz\"):\n self.process_gzip_file(self.filename)\n else:\n self.process_regular_file(self.filename)\n\n #\n # Line processor:\n # - For each line, find non-whitespace strings\n # - See if the non-whitespace string matches some patter of\n # data that we're looking for\n # - Save date into merged_tracker data structure\n def process_lines(self, file_object):\n line_number = 0\n track_line_processing_time = 0.0\n for line in file_object:\n self.tracker.add_line()\n line_number += 1\n non_whitespace = re.split('\\s+', line)\n found_date = False\n start = time.clock()\n for token in non_whitespace:\n word = token.strip()\n # Skip empty words\n if len(word) == 0:\n continue\n\n if not found_date:\n looks_like_date = date_pattern.match(word)\n if looks_like_date:\n self.last_date = looks_like_date.group(1)\n found_date = True\n\n matching = oid_pattern.match(word)\n if not matching:\n matching = object_id_pattern.match(word)\n\n if not matching:\n matching = oid_in_threadname.match(word)\n\n if not matching:\n matching = oid_in_threadname2.match(word)\n\n if matching:\n matched_word = matching.group(1)\n self.tracker.associate_file(matched_word, file_object.name)\n self.tracker.associate_line(matched_word, line_number)\n self.tracker.count(matched_word)\n self.tracker.set_last_date(matched_word, self.last_date)\n self.tokens_processed_count += 1\n total_time = time.clock() - start\n track_line_processing_time += total_time\n\n logger.info(\"Average time per line: {:2.6f} secs\".format(track_line_processing_time/line_number))\n logger.info(\"Average tokens per sec: {:2.3f} secs\"\n .format(self.tokens_processed_count / track_line_processing_time))\n self.completed = True\n return line_number\n\n def process_gzip_file(self, fn):\n with gzip.open(fn) as f:\n self.line_count += self.process_lines(f)\n\n def process_regular_file(self, fn):\n with open(fn) as f:\n self.line_count += self.process_lines(f)\n\n def get_processed_lines(self):\n return self.line_count\n","sub_path":"src/ParseLog/LineReader.py","file_name":"LineReader.py","file_ext":"py","file_size_in_byte":4003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"62047456","text":"\"\"\"\r\nDjango settings for friendspeak project.\r\n\r\nFor more information on this file, see\r\nhttps://docs.djangoproject.com/en/1.7/topics/settings/\r\n\r\nFor the full list of settings and their values, see\r\nhttps://docs.djangoproject.com/en/1.7/ref/settings/\r\n\"\"\"\r\n\r\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\r\nimport os\r\nBASE_DIR = os.path.realpath('.')\r\n\r\n\r\n# Quick-start development settings - unsuitable for production\r\n# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/\r\n\r\n# SECURITY WARNING: keep the secret key used in production secret!\r\nSECRET_KEY = '9&4*%lz)7v-q+8+07p265lpt-djk@hdo#%ng%+uj4g7iu1_o1f'\r\n\r\n# SECURITY WARNING: don't run with debug turned on in production!\r\nDEBUG = True\r\n\r\nTEMPLATE_DEBUG = True\r\n\r\nALLOWED_HOSTS = ['friendspeak.io', 'www.friendspeak.io']\r\n\r\n\r\n# Application definition\r\n\r\nINSTALLED_APPS = (\r\n 'django.contrib.admin',\r\n 'django.contrib.auth',\r\n 'django.contrib.contenttypes',\r\n 'django.contrib.sessions',\r\n 'django.contrib.messages',\r\n 'django.contrib.staticfiles',\r\n 'django.contrib.humanize',\r\n 'blog',\r\n)\r\n\r\nMIDDLEWARE_CLASSES = (\r\n 'django.contrib.sessions.middleware.SessionMiddleware',\r\n 'django.middleware.common.CommonMiddleware',\r\n #'django.middleware.csrf.CsrfViewMiddleware',\r\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\r\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\r\n 'django.contrib.messages.middleware.MessageMiddleware',\r\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\r\n)\r\n\r\nROOT_URLCONF = 'friendspeak.urls'\r\n\r\nWSGI_APPLICATION = 'wsgi.application'\r\n\r\n\r\n# Database\r\n# https://docs.djangoproject.com/en/1.7/ref/settings/#databases\r\n\r\nDATABASES = {\r\n 'default': {\r\n 'ENGINE': 'django.db.backends.mysql',\r\n 'NAME': 'dbCliT1',\r\n 'USER': 'corbin',\r\n 'PASSWORD': 'poF03jh1',\r\n 'HOST': 'localhost',\r\n 'PORT': '3306',\r\n }\r\n}\r\n\r\n# Internationalization\r\n# https://docs.djangoproject.com/en/1.7/topics/i18n/\r\n\r\nLANGUAGE_CODE = 'en-us'\r\n\r\nTIME_ZONE = 'UTC'\r\n\r\nUSE_I18N = True\r\n\r\nUSE_L10N = True\r\n\r\nUSE_TZ = True\r\n \r\n# Static asset configuration\r\nSTATIC_ROOT = 'staticfiles'\r\nSTATIC_URL = '/static/'\r\n\r\nSTATICFILES_DIRS = (\r\n os.path.join(BASE_DIR, 'static'),\r\n)\r\n\r\nTEMPLATE_DIRS = (\r\n os.path.join(BASE_DIR, \"friendspeak/friendspeak/templates\"),\r\n)\r\n","sub_path":"friendspeak/friendspeak/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":2386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"613146261","text":"import uuid\nimport asyncio\nfrom functools import partial\n\nfrom aiojobs.aiohttp import setup, spawn\nimport aiojobs\n\nfrom osuck import config, openstack, ansible\nfrom osuck.keypairs_storage import save_ssh_key, get_ssh_key\n\n\ndef get_apigateway_name() -> str:\n return \"apigateway_%s\" % uuid.uuid4().hex\n\n\ndef prepare_server(session, nova, glance, cinder, neutron, apigw_user_name):\n apigw_internal_name = get_apigateway_name()\n keypair = openstack.create_keypair(nova, apigw_internal_name)\n save_ssh_key(session, keypair)\n image = openstack.get_ubuntu_image(glance)\n flavor = openstack.get_flavor(nova)\n volume_type = openstack.get_volume_type(cinder)\n if image is None or flavor is None or volume_type is None:\n raise OSuckError\n\n volume = openstack.create_bootable_volume(\n client=cinder,\n name=apigw_internal_name,\n image=image,\n size=\"5\",\n volume_type=volume_type,\n )\n return openstack.create_server(nova, apigw_user_name, keypair, volume, flavor)\n\n\nasync def create_server(session, nova, glance, cinder, neutron, apigw_user_name):\n loop = asyncio.get_event_loop()\n server = await loop.run_in_executor(\n None,\n partial(\n prepare_server, session, nova, glance, cinder, neutron, apigw_user_name\n ),\n )\n\n await openstack.wait_for_server_creation(nova, server)\n\n server.add_tag(openstack.APIGATEWAY_TAG)\n\n await asyncio.sleep(0.01)\n\n floating_ip = openstack.attach_floating_ip(neutron, server)\n\n await asyncio.sleep(0.01)\n\n ssh_key = get_ssh_key(session)\n host = floating_ip[\"floatingip\"][\"floating_ip_address\"]\n await openstack.wait_for_ssh_to_be_active(host)\n await ansible.deploy(host, ssh_key)\n\n\nasync def spawn_create_server(\n request, session, nova, glance, cinder, neutron, apigw_user_name\n):\n job = await spawn(\n request, create_server(session, nova, glance, cinder, neutron, apigw_user_name)\n )\n return job\n\n\ndef setup_jobs(app):\n setup(app)\n","sub_path":"osuck/osuck/jobs.py","file_name":"jobs.py","file_ext":"py","file_size_in_byte":2013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"442152341","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Views for the insurance forms.\"\"\"\n\nfrom django.conf import settings\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render\nfrom django.urls import reverse_lazy\nfrom djimix.core.utils import get_connection\nfrom djimix.core.utils import xsql\nfrom djsani.core.sql import STUDENT_VITALS\nfrom djsani.core.utils import get_manager\nfrom djsani.core.utils import get_term\nfrom djsani.insurance.forms import AthleteForm\nfrom djsani.insurance.forms import StudentForm\nfrom djsani.insurance.models import StudentHealthInsurance\nfrom djtools.utils.mail import send_mail\nfrom djtools.utils.users import in_group\n\n\nEARL = settings.INFORMIX_ODBC\n\n\n@login_required\ndef index(request, stype, cid=None):\n \"\"\"Main view for the insurance form.\"\"\"\n medical_staff = False\n user = request.user\n staff = in_group(user, settings.STAFF_GROUP)\n if cid:\n if staff:\n medical_staff = True\n else:\n return HttpResponseRedirect(reverse_lazy('home'))\n else:\n cid = user.id\n\n # get academic term\n term = get_term()\n # get student\n sql = \"\"\" {0}\n WHERE\n id_rec.id = \"{1}\"\n AND stu_serv_rec.yr = \"{2}\"\n AND stu_serv_rec.sess = \"{3}\"\n \"\"\".format(STUDENT_VITALS, cid, term['yr'], term['sess'])\n\n with get_connection(EARL) as connection:\n student = xsql(sql, connection).fetchone()\n\n if not student:\n if medical_staff:\n return HttpResponseRedirect(reverse_lazy('dashboard_home'))\n else:\n return HttpResponseRedirect(reverse_lazy('home'))\n\n # obtain our student medical manager\n manager = get_manager(cid)\n # obtain our health insturance object\n instance = StudentHealthInsurance.objects.using('informix').filter(\n college_id=cid,\n ).filter(\n created_at__gte=settings.START_DATE,\n ).first()\n\n # opt out\n oo = None\n if instance:\n oo = instance.opt_out\n\n # form class\n if stype == 'student':\n form_class = StudentForm\n elif stype == 'athlete':\n form_class = AthleteForm\n\n if request.method == 'POST':\n form = form_class(\n request.POST, request.FILES, manager=manager, instance=instance,\n )\n if form.is_valid():\n insurance = form.save(commit=False)\n insurance.college_id = cid\n insurance.manager_id = manager.id\n insurance.save(using='informix')\n # update the manager\n manager.cc_student_health_insurance = True\n manager.save(using='informix')\n # opt out of insurance\n if insurance.opt_out:\n if manager.sports():\n if not medical_staff:\n # alert email to staff\n if settings.DEBUG:\n to_list = [settings.SERVER_EMAIL]\n else:\n to_list = settings.INSURANCE_RECIPIENTS\n send_mail(\n request,\n to_list,\n \"[Health Insurance] Opt Out: {0} {1} ({2})\".format(\n user.first_name,\n user.last_name,\n cid,\n ),\n user.email,\n 'alert_email.html',\n request,\n )\n if staff:\n redirect = reverse_lazy('student_detail', args=[cid])\n else:\n redirect = reverse_lazy('insurance_success')\n return HttpResponseRedirect(redirect)\n else:\n # form class\n form = form_class(instance=instance, manager=manager)\n\n return render(\n request,\n 'insurance/form.html',\n {\n 'form': form,\n 'oo': oo,\n 'student': student,\n 'medical_staff': medical_staff,\n 'manager': manager,\n 'group_number': settings.INSURANCE_GROUP_NUMBER,\n },\n )\n","sub_path":"djsani/insurance/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"419936475","text":"from requests import get\nfrom errors import KofdataError\nimport constants as const\n\ndef list_keys_in_set(setname):\n\t\n\tif(isinstance(setname, basestring)):\n\t\tsetname = [setname]\n\t\n\turl = const.API_BASE_URL + '/sets/details/{}'\n\t\n\tsets = dict()\n\t\n\tfor s in setname:\n\t\tseturl = url.format(s)\n\t\n\t\tresponse = get(seturl)\n\t\n\t\tif(response.status_code == 200):\n\t\t\tsets[s] = response.json()['keys']\n\t\telse:\n\t\t\traise KofdataError('Could not read from API (status code: {})'.format(response.status_code))\n\t\n\treturn sets","sub_path":"kofdata/kofdata/list_keys_in_set.py","file_name":"list_keys_in_set.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"98324766","text":"import cv2, os, argparse, settings, model\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nclass DogCatClassifier():\n def __init__(self, args, model):\n self.model = model.ConvNeuralNetwork.model\n self.args = args\n\n def run(self):\n self.model.load('model/{}'.format(settings.MODEL_NAME))\n\n # Will be used as display image for plt\n orig_img = cv2.imread(self.args.image)\n \n # Get b,g,r to convert to r,g,b\n b,g,r = cv2.split(orig_img)\n orig_img = cv2.merge([r,g,b])\n\n cv2_img = cv2.imread(self.args.image, cv2.IMREAD_GRAYSCALE)\n cv2_img = cv2.resize(cv2_img, (settings.IMG_SIZE, settings.IMG_SIZE))\n\n img_data = np.array(cv2_img)\n reshaped_img = img_data.reshape(settings.IMG_SIZE, settings.IMG_SIZE, 1).astype('float32')\n\n # Get prediction\n prediction = self.model.predict([reshaped_img])\n\n print(prediction)\n\n if np.argmax(prediction[0]) == 1:\n label = '{}% Dog'.format(round(prediction[0][1] * 100, 2))\n else:\n label = '{}% Cat'.format(round(prediction[0][0] * 100, 2))\n\n plt.figure(num='Image Classifier')\n plt.imshow(orig_img)\n plt.title(label)\n plt.show()\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(prog=\"Image Classifier\",\n description=\"Dog vs Cat Classifier using CNNs by github.com/rsalunga29\")\n parser.add_argument('-i', '--image', type=str, required=True, help=\"Absolute path to image that needs to be classified.\")\n args = parser.parse_args()\n\n classifier = DogCatClassifier(args, model)\n classifier.run()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"292710479","text":"students_list = []\n\nclass Student:\n school_name = \"\"\n school_type = \"\"\n\n #\"initialisation\". creating a custom constructor. done when class is called?\n def __init__(self, name, last_name, id = 332):\n self.name = name\n self.last_name = last_name\n self.id = id\n students_list.append(self)\n\n # string method override. called when we print class\n def __str__(self):\n return \"Student \" + self.name + \" \" + self.last_name + \", id \" + str(self.id)\n\n\n def get_name_capitalize(self):\n return self.name.capitalize()\n\n def get_school_name(self):\n return self.school_name\n\n\nclass PrimaryStudent (Student):\n school_name = \"The Hermitage\"\n school_type = \"Primary\"\n\n #\"self\" refering to the current instance of the class being used\n def year_6_sats_results(self, english, maths, science):\n self.english_mark = english\n self.maths_mark = maths\n self.science_mark = science\n students_list.append(self)\n\n\nclass SecondaryStudent(Student):\n school_name = \"Gordon's\"\n school_type = \"Secondary\"\n gcses = []\n\n\n def gcse_results(self):\n print(\"Hello %s! These are the GCSE results we currently have on record for you:\" %self.name)\n print(self.gcses)\n subject_add = input(\"Would you like to add a subject? type yes or no: \")\n while subject_add != 'no':\n self.subject = input(\"Ok. Please enter a subject name: \")\n self.grade = input(\"Great. What grade was achieved for this subject? \")\n self.gcses.append({self.subject: self.grade})\n subject_add = input(\"Would you like to add a subject? type yes or no: \")\n print(\"Ok, so no more subjects to add. Here is what we have on record: \")\n print(self.gcses)\n students_list.append(self.gcses)\n\n\n\n#mark = Student(\"mark\", \"smith\")\n#print(mark)\n#print(mark.last_name)\n#print(mark.id)\n#print(students)\n\n#mark = PrimaryStudent(\"mark\", \"smith\", 123)\n#PrimaryStudent.year_6_sats_results(mark, 4, 5, 6)\n\n#print(mark)\n#print(mark.english_mark)\n\n#mark = SecondaryStudent(\"mark\", \"smith\", 123)\n#SecondaryStudent.gcse_results(mark)\n\n#SecondaryStudent.gcse_results(mark)\n","sub_path":"students.py","file_name":"students.py","file_ext":"py","file_size_in_byte":2191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"217100849","text":"#!/usr/bin/python3\nimport time\nimport sys\nimport logging\nimport hahomematic\n\nlogging.basicConfig(level=logging.DEBUG)\nLOG = logging.getLogger(__name__)\n\nSLEEPCOUNTER = 0\nGOT_DEVICES = False\n\ndef systemcallback(src, *args):\n global GOT_DEVICES\n print(\"systemcallback: %s\" % src)\n if src == hahomematic.const.HH_EVENT_NEW_DEVICES:\n print(\"Number of new device descriptions: %i\" % len(args[0]))\n return\n elif src == hahomematic.const.HH_EVENT_DEVICES_CREATED:\n GOT_DEVICES = True\n print(\"All devices:\")\n print(hahomematic.data.HA_DEVICES)\n for _, device in hahomematic.data.HA_DEVICES.items():\n print(device)\n print(\"New devices:\")\n print(args[0])\n print(\"New entities:\")\n print(args[1])\n return\n for arg in args:\n print(\"argument: %s\" % arg)\n\ndef eventcallback(address, interface_id, key, value):\n print(\"eventcallback at %i: %s, %s, %s, %s\" % (int(time.time()), address, interface_id, key, value))\n\ndef entityupdatecallback(entity_id):\n print(\"entityupdatecallback at %i: %s\" % (int(time.time()), entity_id))\n\n\n# Specify a unique name to identify our server.\nhahomematic.config.INTERFACE_ID = \"myserver\"\n# For testing we set a short INIT_TIMEOUT\nhahomematic.config.INIT_TIMEOUT = 10\n# We have to set the locations of stored data so the server can load\n# it while initializing.\nhahomematic.config.FILE_DEVICES = 'ha_devices.json'\nhahomematic.config.FILE_PARAMSETS = 'ha_paramsets.json'\nhahomematic.config.FILE_NAMES = 'ha_names.json'\n# Add callbacks to handle the events and see what happens on the system.\nhahomematic.config.CALLBACK_SYSTEM = systemcallback\nhahomematic.config.CALLBACK_EVENT = eventcallback\nhahomematic.config.CALLBACK_ENTITY_UPDATE = entityupdatecallback\n# Create a server that listens on 127.0.0.1:* and identifies itself as myserver.\nserver = hahomematic.Server()\n\n# Create clients\n# Connect to pydevccu at 127.0.0.1:2001\nclient1 = hahomematic.Client(name=\"localhost\", host=\"127.0.0.1\", port=2001, password='', local_port=server.local_port)\n# Connect to CCU for RF-deices at 192.168.1.173:2001\nclient2 = hahomematic.Client(name=\"rf\", host=\"192.168.1.173\", port=2001, password='', local_port=server.local_port)\n# Connect to CCU for HmIP-deices at 192.168.1.173:2010\nclient3 = hahomematic.Client(name=\"hmip\", host=\"192.168.1.173\", port=2010, password='', local_port=server.local_port)\n\n# Clients have to exist prior to starting the server thread!\nserver.start()\n# Once the server is running we subscribe to receive messages.\nclient1.proxy_init()\nclient2.proxy_init()\nclient3.proxy_init()\n\nwhile not GOT_DEVICES and SLEEPCOUNTER < 20:\n print(\"Waiting for devices\")\n SLEEPCOUNTER += 1\n time.sleep(1)\ntime.sleep(5)\n\nfor i in range(16):\n if i % 4 == 0:\n for client in hahomematic.data.CLIENTS:\n if not hahomematic.data.CLIENTS[client].is_connected():\n LOG.warning(\"Disconnected. Reconnecting for %s\" % client)\n hahomematic.data.CLIENTS[client].proxy_init()\n LOG.debug(\"Sleeping (%i)\", i)\n time.sleep(2)\n# Stop the server thread so Python can exit properly.\nserver.stop()\n\nsys.exit(0)","sub_path":"example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":3178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"153468772","text":"#!/usr/bin/env python\n\nimport unittest\nfrom unittest.mock import Mock\n\nfrom genie.utils.cisco_collections import *\n\n\nclass test_typed_set(unittest.TestCase):\n\n def test_init(self):\n\n with self.assertRaises(TypeError):\n typedset()\n with self.assertRaises(TypeError):\n typedset(None)\n with self.assertRaises(TypeError):\n typedset('a')\n\n s = typedset(int)\n self.assertEqual(s, set())\n self.assertRegex(repr(s), r'^typedset\\(int\\)$')\n s.add(1)\n s.add(2)\n s.add(3)\n s.add(2)\n s.remove(3)\n self.assertEqual(s, {1, 2})\n self.assertRegex(repr(s), r'^typedset\\(int, \\{\\d+, \\d+\\}\\)$')\n\n s = typedset(int, '')\n self.assertEqual(s, set())\n self.assertRegex(repr(s), r'^typedset\\(int\\)$')\n s.add(1)\n s.add(2)\n s.add(3)\n s.add(2)\n s.remove(3)\n self.assertEqual(s, {1, 2})\n self.assertRegex(repr(s), r'^typedset\\(int, \\{\\d+, \\d+\\}\\)$')\n\n def my_func(value):\n return int(value)\n s = typedset(my_func, '54634')\n self.assertEqual(s, set(int(c) for c in '54634'))\n self.assertRegex(repr(s), r'^typedset\\(.my_func at 0x\\w+>, \\{\\d+, \\d+, \\d+, \\d+\\}\\)$')\n\n with self.assertRaises(ValueError):\n s.add('abc')\n with self.assertRaises(ValueError):\n s.remove('abc')\n\nif __name__ == '__main__':\n unittest.main()\n\n# vim: ft=python et sw=4\n","sub_path":"Kirk Bryers/VENV/sb_venv/lib/python3.9/site-packages/genie/tests/cisco_collections/test_typed_set.py","file_name":"test_typed_set.py","file_ext":"py","file_size_in_byte":1507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"201042335","text":"import cv2\nimport numpy as np\n\nw = 800\nc = int(w/2)\ncanvas = np.ones((w,w,3)) * 255\n\ndef draw(x,y, color, canvas):\n global w\n canvas[int(w/2 + w/2 * -x), int(w/2 + w/2 * y),:] = color\n\n\ncanvas = cv2.circle(canvas, (int(w/2),int(w/2)), 4, (0,0,0),8)\na = (int(w/2),int(w/2))\nb1 = (int(w/2+np.sin(0.5)*w/4), int(w/2+np.cos(0.5)*w/4))\nb2 = (int(w/2-np.sin(0.5)*w/4), int(w/2+np.cos(0.5)*w/4))\ncanvas = cv2.line(canvas, a, b1, (0,0,0),2)\ncanvas = cv2.line(canvas, a, b2, (0,0,0),2)\n\ncanvas2 = canvas.copy()\n\ngoal = np.array([-0.5,0])\n\nj = 0\nwhile j < 1000000:\n target = np.random.uniform(low=[-0.6,-.6], high=[0,0.6], size=2)\n if np.linalg.norm(goal-target) > 0.1 and np.linalg.norm(goal-target) < 0.2 and np.linalg.norm(target) > 0.45 and np.linalg.norm(target) < 0.55:\n draw(target[0], target[1], [255,0,0], canvas2)\n j += 1\ncv2.circle(canvas2, (int(w/2 + w/2 * goal[1]), int(w/2 + w/2 * -goal[0])), 4, (0,255,0),8)\n\ncv2.imwrite(\"canvas_pix.png\", canvas2[c - w//10:c + c - w//10, c - w//4:c+w//4])\n","sub_path":"plotting/drawing_pix.py","file_name":"drawing_pix.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"642997572","text":"from enum import Enum, IntEnum\nfrom os import environ\n\nfrom flask_wtf import CSRFProtect\n\n\nclass ErrorCodes(IntEnum):\n unknown_route = 0\n unauthorized = 1\n invalid_api_key = 2\n incorrect_parameters = 3\n bad_data_format = 4\n\n\nclass ValidationTypes(Enum):\n json = \"json\"\n none = \"none\"\n params = \"params\"\n\n\nclass BotEventTypes(Enum):\n mod_log = \"mod_log\"\n\n send_message = \"send_message\"\n send_embed = \"send_embed\"\n\n add_role = \"add_role\"\n remove_role = \"remove_role\"\n\n\nDEBUG_MODE = \"FLASK_DEBUG\" in environ\n\n# All snowflakes should be strings as RethinkDB rounds them as ints\nADMIN_BOTS_ROLE = \"270988689419665409\"\nADMINS_ROLE = \"267628507062992896\"\nANNOUNCEMENTS_ROLE = \"463658397560995840\"\nBOTS_ROLE = \"277546923144249364\"\nCODE_JAM_CHAMPIONS_ROLE = \"430492892331769857\"\nCONTRIBS_ROLE = \"295488872404484098\"\nDEVOPS_ROLE = \"409416496733880320\"\nDEVELOPERS_ROLE = \"352427296948486144\"\nHELPERS_ROLE = \"267630620367257601\"\nJAMMERS_ROLE = \"423054537079783434\"\nMODERATORS_ROLE = \"267629731250176001\"\nMUTED_ROLE = \"277914926603829249\"\nOWNERS_ROLE = \"267627879762755584\"\nPARTNERS_ROLE = \"323426753857191936\"\nPYTHON_ROLE = \"458226699344019457\"\nSTREAMERS_ROLE = \"462650825978806274\"\nSUBREDDIT_MOD_ROLE = \"458226413825294336\"\n\nALL_STAFF_ROLES = (OWNERS_ROLE, ADMINS_ROLE, MODERATORS_ROLE, DEVOPS_ROLE)\nTABLE_MANAGER_ROLES = (OWNERS_ROLE, ADMINS_ROLE, DEVOPS_ROLE)\nEDITOR_ROLES = ALL_STAFF_ROLES + (HELPERS_ROLE, CONTRIBS_ROLE)\n\nSERVER_ID = 267624335836053506\n\nDISCORD_API_ENDPOINT = \"https://discordapp.com/api\"\n\nDISCORD_OAUTH_REDIRECT = \"/auth/discord\"\nDISCORD_OAUTH_AUTHORIZED = \"/auth/discord/authorized\"\nDISCORD_OAUTH_ID = environ.get('DISCORD_OAUTH_ID', '')\nDISCORD_OAUTH_SECRET = environ.get('DISCORD_OAUTH_SECRET', '')\nDISCORD_OAUTH_SCOPE = 'identify'\nOAUTH_DATABASE = \"oauth_data\"\n\nGITLAB_ACCESS_TOKEN = environ.get(\"GITLAB_ACCESS_TOKEN\", '')\n\nPREFERRED_URL_SCHEME = environ.get(\"PREFERRED_URL_SCHEME\", \"http\")\n\nERROR_DESCRIPTIONS = {\n # 5XX\n 500: \"The server encountered an unexpected error ._.\",\n 501: \"Woah! You seem to have found something we haven't even implemented yet!\",\n 502: \"This is weird, one of our upstream servers seems to have experienced an error.\",\n 503: \"Looks like one of our services is down for maintenance and couldn't respond to your request.\",\n 504: \"Looks like an upstream server experienced a timeout while we tried to talk to it!\",\n 505: \"You're using an old HTTP version. It might be time to upgrade your browser.\",\n # 4XX\n 400: \"You sent us a request that we don't know what to do with.\",\n 401: \"Nope! You'll need to authenticate before we let you do that.\",\n 403: \"No way! You're not allowed to do that.\",\n 404: \"We looked, but we couldn't seem to find that page.\",\n 405: \"That's a real page, but you can't use that method.\",\n 408: \"We waited a really long time, but never got your request.\",\n 410: \"This used to be here, but it's gone now.\",\n 411: \"You forgot to tell us the length of the content.\",\n 413: \"No way! That payload is, like, way too big!\",\n 415: \"The thing you sent has the wrong format.\",\n 418: \"I'm a teapot, I can't make coffee. (._.)\",\n 429: \"Please don't send us that many requests.\"\n}\n\nJAM_STATES = [\n \"planning\",\n \"announced\",\n \"preparing\",\n \"running\",\n \"judging\",\n \"finished\"\n]\n\nJAM_QUESTION_TYPES = [\n \"checkbox\",\n \"email\",\n \"number\",\n \"radio\",\n \"range\",\n \"text\",\n \"textarea\",\n \"slider\"\n]\n\n# Server role colors\nROLE_COLORS = {\n ADMIN_BOTS_ROLE: \"#6f9fed\",\n ADMINS_ROLE: \"#e76e6c\",\n BOTS_ROLE: \"#6f9fed\",\n CODE_JAM_CHAMPIONS_ROLE: \"#b108b4\",\n CONTRIBS_ROLE: \"#55cc6c\",\n DEVOPS_ROLE: \"#a1d1ff\",\n DEVELOPERS_ROLE: \"#fcfcfc\",\n HELPERS_ROLE: \"#e0b000\",\n JAMMERS_ROLE: \"#258639\",\n MODERATORS_ROLE: \"#ce3c42\",\n MUTED_ROLE: \"#fcfcfc\",\n OWNERS_ROLE: \"#ffa3a1\",\n PARTNERS_ROLE: \"#b66fed\",\n PYTHON_ROLE: \"#6f9fed\",\n STREAMERS_ROLE: \"#833cba\",\n SUBREDDIT_MOD_ROLE: \"#d897ed\",\n}\n\n# CSRF\nCSRF = CSRFProtect()\n\n# Bot key\nBOT_API_KEY = environ.get(\"BOT_API_KEY\")\n\n# RabbitMQ settings\nBOT_EVENT_QUEUE = \"bot_events\"\n\nRMQ_USERNAME = environ.get(\"RABBITMQ_DEFAULT_USER\") or \"guest\"\nRMQ_PASSWORD = environ.get(\"RABBITMQ_DEFAULT_PASS\") or \"guest\"\nRMQ_HOST = \"localhost\" if DEBUG_MODE else environ.get(\"RABBITMQ_HOST\") or \"pdrmq\"\nRMQ_PORT = 5672\n\n# Channels\nCHANNEL_MOD_LOG = 282638479504965634\nCHANNEL_DEV_LOGS = 409308876241108992\nCHANNEL_JAM_LOGS = 452486310121439262\n","sub_path":"pysite/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":4487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"646470290","text":"import labelparser.labelparser as lp\nfrom pm4py.objects.petri.importer import pnml as pnml_importer\nfrom knowledgebase.knowledgebase import KnowledgeBase\nfrom knowledgebase.knowledgerecord import Observation\nimport itertools\nimport os\nimport json\n\nclass BPMNPopulator:\n\n def __init__(self, equal_bos=False, heuristic_parser=True):\n self.parser = lp.load_default_parser()\n self.equal_bos = equal_bos\n self.heuristic_parser = heuristic_parser\n self.follows = None\n self.labels = None\n self.tasks = set()\n self.parsed_models = 0\n self.parsed_models_used = 0\n\n def populate(self, knowledge_base: KnowledgeBase, path_to_directory):\n\n # Load list of \"parsable\" models\n with open(\"input/knowledgebase/ai_file_list.txt\") as f:\n parsable_files = f.readlines()\n parsable_files = tuple([x.strip() for x in parsable_files])\n\n self.parsed_models = 1\n\n files = os.listdir(path_to_directory)\n files = [f for f in files if (f.endswith(\"json\") and not f.endswith(\"meta.json\"))]\n for i, file in enumerate(files):\n print(f\"{i} - ({self.parsed_models}, {self.parsed_models_used}) --------------\")\n print(path_to_directory + file)\n\n # Only use models that can be parsed (the \"positive list\" was created separately)\n if file.endswith(parsable_files):\n self._populate_from_json(knowledge_base, os.path.abspath(path_to_directory) + \"/\" + file)\n else:\n print(\"MODEL SKIPPED\")\n print(f\"Number of models parsed: {self.parsed_models}\")\n\n # Checks whether considered json file is an English BPMN 2.0 model accordingg to meta file\n def _is_en_bpmn(self,path_to_directory,json_file):\n json_file = json_file.replace(\".json\",\".meta.json\")\n with open(os.path.abspath(path_to_directory) + \"/\" + json_file, 'r') as f:\n data = f.read()\n json_data = json.loads(data)\n mod_language = json_data['model']['modelingLanguage']\n nat_language = json_data['model']['naturalLanguage']\n if mod_language==\"bpmn20\" and nat_language==\"en\":\n return True\n else:\n return False\n\n def _populate_from_json(self, knowledge_base: KnowledgeBase, path_to_json):\n self.follows, self.labels, self.tasks = self._loadJSON(path_to_json)\n print_info = False\n extracted_info = False\n\n # Clean strings (especially from SAP)\n for l in self.labels.keys():\n self.labels[l] = self.labels[l].replace('\\n', ' ').replace('\\r', '').replace(' ',' ')\n\n # Find source and sink shapes (typically events)\n source_shapes = set()\n sink_shapes = set()\n for s in self.follows.keys():\n\n # Iterate over all shapes except sequence flows\n irrelevant_shapes = (\"SequenceFlow\", \"DataObject\", \"Pool\", \"Lane\")\n if not self.labels[s].startswith(irrelevant_shapes):\n if len(self._get_postset(s)) == 0:\n sink_shapes.add(s)\n if len(self._get_preset(s)) == 0:\n source_shapes.add(s)\n\n # Print source and sink shapes\n if print_info:\n print()\n print(\"Source and sink shapes:\")\n print([self.labels[s] for s in source_shapes])\n print([self.labels[s] for s in sink_shapes])\n\n # Get all finite paths from start to end shapes\n finite_paths = []\n for s1 in source_shapes:\n for s2 in sink_shapes:\n if s1 != s2:\n finite_paths = [*finite_paths, *self._get_possible_paths(s1, s2, [])]\n\n # Print all finite paths\n if print_info:\n print()\n print(\"All finite paths:\")\n for p in finite_paths:\n print([self.labels[s] for s in p])\n\n\n # Note that the computation below is still heuristic because of loops\n # Test for co-occurrence and exclusiveness\n for s1 in self.tasks:\n for s2 in self.tasks:\n # If s1 and 22 co-occur in ALL finite paths, we consider them as co-occurring\n cooccurrence = True\n # If s1 and s2 do NOT co-occur in ANY finite path, we consider them exclusive\n exclusive = True\n for p in finite_paths:\n if not (s1 in p) or not (s2 in p):\n cooccurrence = False\n if s1 in p and s2 in p:\n exclusive = False\n label1 = self.labels[s1].lower()\n label2 = self.labels[s2].lower()\n if (cooccurrence or exclusive) and not self.heuristic_parser:\n s1_parse = self.parser.parse_label(label1)\n s2_parse = self.parser.parse_label(label2)\n # check BO requirements\n if not self.equal_bos or s1_parse.bos == s2_parse.bos:\n if len(s1_parse.actions) > 0 and len(s2_parse.actions) > 0:\n if s1_parse.actions[0] != s2_parse.actions[0]:\n if cooccurrence:\n knowledge_base.add_observation(s1_parse.actions[0], s2_parse.actions[0],Observation.CO_OCC)\n extracted_info = True\n if print_info:\n print(f\"CO-OCC:{self.labels[s1]} - {self.labels[s2]}, verbs: {s1_parse.actions[0]} - {s2_parse.actions[0]}\")\n if exclusive:\n knowledge_base.add_observation(s1_parse.actions[0], s2_parse.actions[0],Observation.XOR)\n extracted_info = True\n if print_info:\n print(f\"XOR: {self.labels[s1]} - {self.labels[s2]}, verbs: {s1_parse.actions[0]} - {s2_parse.actions[0]}\")\n if (cooccurrence or exclusive) and self.heuristic_parser:\n if not self.equal_bos or lp.differ_by_one_word(label1, label2):\n (verb1, verb2) = lp.get_differences(label1, label2)\n if cooccurrence:\n knowledge_base.add_observation(verb1, verb2, Observation.CO_OCC)\n extracted_info = True\n if print_info:\n print(f\"CO-OCC:{self.labels[s1]} - {self.labels[s2]}, verbs: {verb1} - {verb2}\")\n if exclusive:\n knowledge_base.add_observation(verb1, verb2, Observation.XOR)\n extracted_info = True\n if print_info:\n print(f\"XOR: {self.labels[s1]} - {self.labels[s2]}, verbs: {verb1} - {verb2}\")\n\n # Search for lifecycle relations\n for s1 in self.tasks:\n for s2 in self.tasks:\n if s2 in self._get_transitive_postset(s1, set()) and not s1 in self._get_transitive_postset(s2,set()):\n label1 = self.labels[s1].lower()\n label2 = self.labels[s2].lower()\n if not self.heuristic_parser:\n s1_parse = self.parser.parse_label(label1)\n s2_parse = self.parser.parse_label(label2)\n # check BO requirements\n if not self.equal_bos or s1_parse.bos == s2_parse.bos:\n # There need to be different actions ...\n if len(s1_parse.actions) > 0 and len(s2_parse.actions) > 0:\n if s1_parse.actions[0] != s2_parse.actions[0]:\n # and the same business object\n # if t1_parse.bos == t2_parse.bos:\n knowledge_base.add_observation(s1_parse.actions[0], s2_parse.actions[0],Observation.ORDER)\n extracted_info = True\n if print_info:\n print(f\"ORDER: {self.labels[s1]} - {self.labels[s2]}, verbs: {s1_parse.actions[0]} - {s2_parse.actions[0]}\")\n if self.heuristic_parser and lp.differ_by_one_word(label1, label2):\n (verb1, verb2) = lp.get_differences(label1, label2)\n knowledge_base.add_observation(verb1, verb2, Observation.ORDER)\n extracted_info = True\n if print_info:\n print(f\"ORDER: {self.labels[s1]} - {self.labels[s2]}, verbs: {verb1} - {verb2}\")\n self.parsed_models += 1\n if extracted_info == True:\n self.parsed_models_used += 1\n\n def _get_possible_paths(self,s1,s2, path=[]):\n # Returns all possible paths from s1 to s2 as a list of lists\n postset = self._get_postset(s1)\n # if target (s2) is in postset, add current and target shape and return\n if s2 in postset:\n path.append(s1)\n path.append(s2)\n return [path]\n # if no shapes in postset, return empty list\n if len(postset) == 0:\n return []\n # Several shapes in postset ...\n else:\n path.append(s1)\n\n # Determine shapes to be visited (make sure that we don't visit the same shape again and get stuck\n to_be_visited = postset.difference(set(path))\n # If no shape is left, return empty list\n if len(to_be_visited) == 0:\n return []\n else:\n paths = []\n # Recursively traverse\n for s in to_be_visited:\n recursive_paths = self._get_possible_paths(s, s2, path.copy())\n if len(recursive_paths) > 0:\n if isinstance(recursive_paths[0], list):\n for p in recursive_paths:\n paths.append(p)\n else:\n paths.append(recursive_paths)\n return paths\n\n\n def _get_transitive_postset(self,shape,visited_shapes):\n # Returns all shapes in the postset of a shape. Note that these might\n # include all shapes if the model contains loops.\n\n # Obtain all shapes in the postset of considered shape\n transitive_post_set = self._get_postset(shape)\n\n # Determine which transitions still need to be visited\n to_be_visited = transitive_post_set.difference(visited_shapes)\n\n # Update visited shapes\n visited_shapes.update(to_be_visited)\n if len(to_be_visited) == 0:\n return set()\n else:\n # Recursively build transitive postset\n for s in to_be_visited:\n recursive_result = self._get_transitive_postset(s, visited_shapes)\n transitive_post_set.update(recursive_result)\n visited_shapes.update(recursive_result)\n return transitive_post_set\n\n\n def _get_postset(self,shape):\n # Note: The direct postset of a shape typically only contains the arc, not another element.\n # Exceptions are attached events. Both is handled properly.\n postset = set()\n direct_postset = set(self.follows[shape])\n for s in direct_postset:\n # Ignore message flows\n if self.labels[s].startswith(\"MessageFlow\"):\n continue\n if not self.labels[s].startswith(\"SequenceFlow\"):\n postset.add(s)\n else:\n postset.update(self.follows[s])\n return postset\n\n\n def _get_preset(self,shape):\n # Note: The direct preset of a shape typically only contains the arc, not another element.\n # Exceptions are attached events. Both is handled properly.\n preset = set()\n for s1 in self.follows.keys():\n if s1!=shape and shape in self.follows[s1]:\n if not self.labels[s1].startswith(\"MessageFlow\"):\n if not self.labels[s1].startswith(\"SequenceFlow\"):\n preset.add(s1)\n else:\n for s2 in self.follows.keys():\n if s2!=s1 and s1 in self.follows[s2]:\n preset.add(s2)\n return preset\n\n\n def _process_shapes(self, shapes):\n\n follows = {}\n labels = {}\n tasks = set()\n\n # Analyze shape list and store all shapes and activities\n # PLEASE NOTE: the code below ignores BPMN sub processes\n for shape in shapes:\n\n # Save all shapes to dict\n #print(shape['stencil']['id'], shape)\n\n # If current shape is a pool or a lane, we have to go a level deeper\n if shape['stencil']['id'] == 'Pool' or shape['stencil']['id'] == 'Lane':\n result = self._process_shapes(shape['childShapes'])\n follows.update(result[0])\n labels.update(result[1])\n tasks.update(result[2])\n\n shapeID = shape['resourceId']\n outgoingShapes = [s['resourceId'] for s in shape['outgoing']]\n if shapeID not in follows:\n follows[shapeID] = outgoingShapes\n\n # Save all tasks and respective labels separately\n if shape['stencil']['id'] == 'Task':\n if not shape['properties']['name'] == \"\":\n tasks.add(shape['resourceId'])\n labels[shape['resourceId']] = shape['properties']['name']\n else:\n labels[shape['resourceId']] = 'Task'\n else:\n if 'name' in shape['properties'] and not shape['properties']['name'] == \"\":\n labels[shape['resourceId']] = shape['stencil']['id'] + \" (\" + shape['properties']['name'] + \")\";\n else:\n labels[shape['resourceId']] = shape['stencil']['id']\n return follows, labels, tasks\n\n def _loadJSON(self, path_to_json):\n json_data = None\n with open(path_to_json, 'r') as f:\n data = f.read()\n json_data = json.loads(data)\n\n follows, labels, tasks = self._process_shapes(json_data['childShapes'])\n return follows, labels, tasks\n\n\n","sub_path":"code/knowledgebase_population/bpmnpopulator.py","file_name":"bpmnpopulator.py","file_ext":"py","file_size_in_byte":14440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"483486988","text":"#!/usr/bin/env python\nimport logging\n\n# from termcolor import colored\nDEFAULT_LOG_FORMAT = ('%(levelname)s %(message)s')\nDEFAULT_TIME_STAMP = \"%m-%d %H:%M:%S\"\nBLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)\n\n# The background is set with 40 plus the number of the color, and the foreground with 30\n\n# These are the sequences need to get colored ouput\nRESET_SEQ = \"\\033[0m\"\nCOLOR_SEQ = \"\\033[1;%dm\"\nBOLD_SEQ = \"\\033[1m\"\n\n\ndef formatter_message(message, use_color=True):\n if use_color:\n message = message.replace(\n \"$RESET\", RESET_SEQ).replace(\"$BOLD\", BOLD_SEQ)\n else:\n message = message.replace(\"$RESET\", \"\").replace(\"$BOLD\", \"\")\n return message\n\n\nCOLORS = {\n 'WARNING': YELLOW,\n 'INFO': WHITE,\n 'DEBUG': BLUE,\n 'CRITICAL': YELLOW,\n 'ERROR': RED\n}\n\n\nclass ColoredFormatter(logging.Formatter):\n def __init__(self, msg, use_color=True):\n logging.Formatter.__init__(self, msg, DEFAULT_TIME_STAMP)\n self.use_color = use_color\n\n def format(self, record):\n levelname = record.levelname\n if self.use_color and levelname in COLORS:\n levelname_color = COLOR_SEQ % (\n 30 + COLORS[levelname]) + levelname + RESET_SEQ\n record.levelname = levelname_color\n return logging.Formatter.format(self, record)\n\n\nclass ColoredLogger(logging.Logger):\n FORMAT = DEFAULT_LOG_FORMAT\n COLOR_FORMAT = formatter_message(FORMAT, True)\n\n def __init__(self, name):\n logging.Logger.__init__(self, name, logging.DEBUG)\n\n color_formatter = ColoredFormatter(self.COLOR_FORMAT)\n\n console = logging.StreamHandler()\n console.setFormatter(color_formatter)\n\n self.addHandler(console)\n return\n\nlogging.setLoggerClass(ColoredLogger)\n\ndef get_logger(name, level=logging.INFO):\n logger = logging.getLogger(name)\n logger.setLevel(level)\n return logger\n\nif __name__ == '__main__':\n log = get_logger('test')\n log2 = get_logger('test2')\n log.warn('sdf')\n log.info('ss')\n","sub_path":"simplelog.py","file_name":"simplelog.py","file_ext":"py","file_size_in_byte":2036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"427776473","text":"import turtle\nimport random\ncolors=[\"blue\",\"red\",\"yellow\",\"magenta\",\"orange\",\"black\",\"green\",\"gray\",\"pink\",\"purple\",\"white\",\"GOLD\",]\nturtle.hideturtle()\nturtle.bgcolor(\"cyan\")\nSIZE_X= 800\nSIZE_Y= 500\nfood=turtle.clone()\nfood.penup()\nturtle.setup(SIZE_X+50, SIZE_Y+50)\ntriangle = turtle.clone()\n\ntriangle.penup()\ntriangle.goto(SIZE_X/2,-SIZE_Y/2)\ntriangle.pendown()\ntriangle.goto(SIZE_X/2,SIZE_Y/2)\ntriangle.goto(-SIZE_X/2,SIZE_Y/2)\ntriangle.goto(-SIZE_X/2,-SIZE_Y/2)\ntriangle.goto(SIZE_X/2,-SIZE_Y/2)\n\ntriangle.hideturtle()\nturtle.penup()\n\nUP_ARROW = 'Up'\nLEFT_ARROW = 'Left'\nDOWN_ARROW = 'Down'\nRIGHT_ARROW = 'Right'\nTIME_STEP = 100\nSTART_LENGTH=5\n\nSPACEBAR = 'space'\n\nUP = 0\nDOWN = 1\nLEFT = 2\nRIGHT = 3\n\n\n \nSQUARE_SIZE= 20\nr = SQUARE_SIZE/2\nfood.shape(\"circle\")\npos_list=[]\nstamp_list=[]\nfood_stamps=[]\nfood_pos=[]\nFood_size=[]\ncircle=turtle.clone()\ncircle.shape(\"circle\")\nc=0\nturtle.hideturtle()\nCIRCLE_SIZE = 50\nscore = 0\nfor i in range (START_LENGTH):\n food_pos_x=food.pos()[0]\n food_pos_y=food.pos()[1]\n x_pos=circle.pos()[0]\n y_pos=circle.pos()[1]\n my_pos=(x_pos,y_pos)\n circle.goto(x_pos,y_pos)\n pos_list.append(my_pos)\n cstamp=circle.stamp\n stamp_list.append(cstamp)\ndef make_food():\n global x\n color=random.choice(colors)\n food.color(color)\n min_x=-int(SIZE_X/2.5/SQUARE_SIZE)+1\n max_x=int(SIZE_X/2.5/SQUARE_SIZE)-1\n min_y=-int(SIZE_Y/2.5/SQUARE_SIZE)-1\n max_y=int(SIZE_Y/2.5/SQUARE_SIZE)+1\n food_x=random.randint(min_x,max_x)*SQUARE_SIZE\n food_y=random.randint(min_y,max_y)*SQUARE_SIZE\n food_size=random.randint(20,90)\n food.goto(food_x,food_y)\n food_pos.append(food.pos())\n food.dot(food_size)\n aliens=food.stamp()\n food_stamps.append(aliens)\n Food_size.append(food_size)\n\nfor i in range(20):\n make_food()\n \ndirection=UP\nUP_EDGE=SIZE_Y/2\nDOWN_EDGE=-SIZE_Y/2\nLEFT_EDGE = -SIZE_X/2\nRIGHT_EDGE = SIZE_X/2\n\ndef up ():\n global direction\n if direction== DOWN:\n print(\"you pressed down\")\ndef down():\n global direction\n if direction == UP:\n print (\"you pressed up\")\ndef left():\n global direction\n if direction== LEFT:\n print(\"you pressed left\")\ndef right():\n global direction\n if direction == RIGHT:\n print(\"you pressed right\")\n\ndef move_circle():\n global score\n my_pos = circle.pos()\n x_pos = my_pos[0]\n y_pos = my_pos[1]\n new_pos = circle.pos()\n new_x_pos = new_pos[0]\n new_y_pos = new_pos[1]\n\n i = 0\n for current_food in food_pos:\n x_food = current_food[0]\n y_food = current_food[1]\n\n distance = ((x_food - x_pos)**2 + (y_food - y_pos)**2)**0.5\n check = Food_size[i]/2 + CIRCLE_SIZE/2\n \n if distance <= check:\n food_ind = food_pos.index((x_food, y_food))\n food.clearstamp(food_stamps[food_ind])\n food_pos.pop(food_ind)\n food_stamps.pop(food_ind)\n circle.dot(CIRCLE_SIZE + 5)\n print('you have eaten the food')\n\n turtle.clear()\n score = score +1\n turtle.goto(-SIZE_X/2+5, SIZE_Y/2-12)\n turtle.write('score = ' + str(score))\n i = i + 1\n \n \n if new_y_pos >= UP_EDGE:\n print(\"you hit the upper edge... game over\")\n quit()\n if new_y_pos <= DOWN_EDGE:\n print(\"you hit the right lower... game over\")\n quit()\n if new_x_pos <= LEFT_EDGE:\n print(\"you hit the left edge... game over\")\n quit()\n\n if new_x_pos >= RIGHT_EDGE:\n print(\"you hit the right edge... game over\")\n quit()\n\n\n \n if direction == RIGHT:\n circle.goto(x_pos + CIRCLE_SIZE, y_pos)\n print('you moved right!')\n \n elif direction == LEFT:\n circle.goto(x_pos - CIRCLE_SIZE, y_pos)\n print('you moved left!')\n elif direction == DOWN:\n circle.goto(x_pos, y_pos - CIRCLE_SIZE)\n print('you moved down!')\n elif direction == UP:\n circle.goto(x_pos, y_pos + CIRCLE_SIZE)\n print('you moved up!')\n\n my_pos = circle.pos()\n pos_list.append(my_pos)\n new_stamp = circle.stamp()\n stamp_list.append(new_stamp)\n old_stamp = stamp_list.pop(0)\n circle.clearstamp(old_stamp)\n pos_list.pop(0)\n\n turtle.ontimer(move_circle,TIME_STEP)\n \nmove_circle()\nturtle.onkeypress(up, UP_ARROW)\nturtle.onkeypress(down, DOWN_ARROW) \nturtle.onkeypress(left, LEFT_ARROW)\nturtle.onkeypress(right, RIGHT_ARROW)\nturtle.listen()\n\n\n","sub_path":"food.py","file_name":"food.py","file_ext":"py","file_size_in_byte":4440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"510449437","text":"#!/usr/bin/env python\nimport os\nimport numpy as np\n\n\nwith open('/data/mridata/jdeng/tools/asymmetry/data/165_wmap_FC_HC_paths.txt', 'r') as fin:\n\thcs = [x.strip() for x in fin.readlines()]\n\n# 165 wmap FC controls by 246 Brainnetome nodes\nmean_ts_table = np.zeros((165,246))\n\nfor i,path in enumerate(hcs):\n\tts = np.loadtxt(os.path.join(path, 'processedfmri_TRCNnSFmDI/matrix/ts_mat_246.txt'))\n\tts_mean = ts.mean(axis=1)\n\tmean_ts_table[i,] = ts_mean\n\nnp.savetxt('/data/mridata/jdeng/tools/asymmetry/data/ts_mat_mean_165_246.txt', mean_ts_table)\n\n","sub_path":"scripts/make_mean_table.py","file_name":"make_mean_table.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"602238538","text":"\r\n'''\r\nmodule: _ode3.py\r\nauthor: Luis Paris\r\ndescription:\r\n- formulas from Chapra's numerical methods textbook\r\n- chapters on ordinary differential equations (ODEs)\r\n- implemented from algorithms/pseudocode provided\r\ncomments:\r\n- the following ODE algorithms implement ONE iteration to estimate the next value of y, given:\r\n * reference points x,y\r\n * df(x,y)/dx (or estimate) at reference points x,y\r\n'''\r\n\r\ndef RK4(dfxy, x, y, h):\r\n k1 = dfxy(x, y)\r\n ym = y + k1*h/2.\r\n k2 = dfxy(x + h/2., ym)\r\n ym = y + k2*h/2.\r\n k3 = dfxy(x + h/2., ym)\r\n ye = y + k3*h\r\n k4 = dfxy(x + h, ye)\r\n m = (k1 + 2*(k2 + k3) + k4)/6.\r\n return y + m*h\r\n#end RK4()\r\n","sub_path":"CISC601 - Scientific Computer II/PythonProject/_ode3.py","file_name":"_ode3.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"449540813","text":"from matplotlib import pyplot as plt\n# from matplotlib import font_manager\nfrom matplotlib import rc\n\n# zh_font = font_manager.FontProperties(fname=\"/usr/share/fonts/truetype/win10-fonts/msyhl.ttc\")\nzh_font = {\"family\": \"Microsoft YaHei\"}\nrc(\"font\", **zh_font)\n\nx = range(20)\nx_labels = [\"%d岁\" % i for i in range(11, 31)]\n\na = [1, 0, 1, 1, 2, 4, 3, 2, 3, 4, 4, 5, 6, 5, 4, 3, 3, 1, 1, 1]\nb = [1, 0, 3, 1, 2, 2, 3, 3, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1]\n\n# print(\"x:\\n\", list(x))\n# print(\"x_labels:\\n\", x_labels)\n\nplt.plot(x, a, label=\"自己\")\nplt.plot(x, b, label=\"同桌\")\n\n# plt.xticks(x, x_labels, fontproperties=zh_font, rotation=45)\n# plt.legend(prop=zh_font, loc=\"center\")\nplt.xticks(x, x_labels, rotation=45)\nplt.legend(loc=\"upper left\")\n\nplt.show()\n\n","sub_path":"t2.py","file_name":"t2.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"481746185","text":"from robot.utils import ConnectionCache\nfrom AWSLibrary.base import LibraryComponent\nfrom AWSLibrary.base.robotlibcore import keyword\nimport boto3\n\n\nclass SessionKeywords(LibraryComponent):\n\n def __init__(self, state):\n LibraryComponent.__init__(self, state)\n self._cache = ConnectionCache('No sessions.')\n\n @keyword('Create Session With Keys')\n def create_session_with_keys(self, region, access_key, secret_key):\n \"\"\"Takes Region as an argument and creates as session with your access key\n and secret key stored at ~/.aws/credentials.\n Will throw error if not configured.\n\n Examples:\n | Create Session With Keys | us-west-1 | access key | secret key |\n \"\"\"\n self.rb_logger.info(\"Creating Session: %s\" % region)\n session = boto3.Session(\n aws_access_key_id=access_key,\n aws_secret_access_key=secret_key,\n region_name=region\n )\n print(session)\n self._cache.register(session, alias=region)\n self.state.session = session\n return session\n\n @keyword('Create Session With Profile')\n def create_session_with_profile(self, region, profile):\n \"\"\"Takes Region as an argument and creates as session with your profile\n stored at ~/.aws/config. Will throw error if not configured\n\n Examples:\n | Create Session With Profile | us-west-1 | profile name |\n \"\"\"\n self.rb_logger.info(f\"Creating Session: {region}, {profile}\")\n session = boto3.Session(\n profile_name=profile,\n region_name=region\n )\n self._cache.register(session, alias=region)\n self.state.session = session\n return session\n\n @keyword('Delete Session')\n def delete_session(self, region, profile=None):\n \"\"\"Removes session.\n Arguments:\n - ``region``: A case and space insensitive string to identify the session.\n (Default ``region``)\n Examples:\n | Delete Session | REGION |\n \"\"\"\n self._cache.switch(region)\n index = self._cache.current_index\n self._cache.current = self._cache._no_current\n self._cache._connections[index - 1] = None\n self._cache._aliases.pop(region)\n\n @keyword('Delete All Sessions')\n def delete_all_sessions(self):\n \"\"\" Delete All Sessions \"\"\"\n self._cache.empty_cache()\n","sub_path":"src/AWSLibrary/keywords/session.py","file_name":"session.py","file_ext":"py","file_size_in_byte":2411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"530215388","text":"# Copyright (C) 2021 FRANCINALDO CARVALHO \r\n#\r\n# This program is free software; you can redistribute it and/or modify\r\n# it under the terms of the GNU General Public License as published by\r\n# the Free Software Foundation; either version 2 of the License, or\r\n# (at your option) any later version.\r\n#\r\n# This program is distributed in the hope that it will be useful,\r\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n# GNU General Public License for more details.\r\n#\r\n# You should have received a copy of the GNU General Public License\r\n# along with this program; if not, write to the Free Software\r\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\r\n\r\n# -*- coding: utf-8 -*-\r\n\r\n\r\nimport pyautogui\r\nimport pyperclip\r\nimport os\r\nfrom os import listdir\r\nfrom os.path import isdir, isfile, join\r\n\r\nfileFrame = \"D:\\\\scripts\\\\python\\\\processos_frame_pje.txt\"\r\nprocs = \"D:\\\\scripts\\\\python\\\\files.txt\"\r\n\r\ndef inserirNosCampos(processo):\r\n pyperclip.copy(processo)\r\n pyautogui.hotkey('ctrl','v')\r\n #pyperclip.paste\r\n pyautogui.press('tab')\r\n pyautogui.PAUSE = 0.5\r\n \r\n return 0\r\n\r\ndef insertOracleMalaDireta(listaMD):\r\n \r\n pyautogui.moveTo(164,180,0.2)\r\n pyautogui.click()\r\n for processo in listaMD:\r\n inserirNosCampos(processo)\r\n return 0\r\n\r\ndef insertOracleMovimentacao(listaMov):\r\n pyautogui.moveTo(164,180,0.2)\r\n pyautogui.click()\r\n\r\n for processo in listaMov:\r\n inserirNosCampos(processo)\r\n return 0\r\n\r\ndef insertOracleLocalizacao(listaLoc):\r\n pyautogui.moveTo(164,180,0.2)\r\n pyautogui.click()\r\n for processo in listaLoc:\r\n inserirNosCampos(processo)\r\n return 0\r\n\r\n\r\ndef formatProc(processo):\r\n proc = removeMaskLine(processo)\r\n proc = addZerosLeft(proc)\r\n\r\n return proc\r\n\r\ndef validProc(numProc): \r\n d1 = numProc[7]\r\n d2 = numProc[8]\r\n \r\n digito = d1+d2\r\n \r\n iDigito = int(digito)\r\n\r\n #numProc = numProc[:7] + numProc[9:] + '00'\r\n numProc = numProc[:7] + numProc[9:] + \"00\"\r\n \r\n np = int(numProc)\r\n\r\n modulo = np % 97\r\n\r\n if (98-modulo == iDigito):\r\n return True\r\n else:\r\n return False\r\n\r\ndef verificaValidadeLista(listv):\r\n listaValida = True\r\n for processo in listv:\r\n numProc = formatProc(processo)\r\n if validProc(numProc):\r\n print(processo + \" é válido\") \r\n else:\r\n print(processo + \" NÃO é válido\")\r\n listaValida = False\r\n\r\n if listaValida:\r\n return True\r\n else:\r\n return False\r\n\r\n\r\ndef removeMaskLine(proc):\r\n\r\n #blacklist = set('.-\\\"')\r\n #proc.join(c for c in proc if c not in blacklist)\r\n string = proc\r\n string = string.replace('.', '')\r\n string = string.replace('-', '')\r\n \r\n return string\r\n\r\ndef addZerosLeft(proc):\r\n\r\n if (len(proc) < 20):\r\n qZerosLeft = (20 - len(proc))*\"0\"\r\n proc = qZerosLeft + proc\r\n\r\n return proc\r\n\r\ndef normalizeProcess(lst): #remove mask and fill with zeros ahead\r\n listaOut = []\r\n #listaOut.clear()\r\n for l in lst:\r\n proc_ = removeMaskLine(l)\r\n proc_ = addZerosLeft(proc_)\r\n listaOut.append(proc_)\r\n\r\n return listaOut\r\n\r\n\r\ndef listaProcessos(listaLP):\r\n \r\n for processo in listaLP:\r\n print(processo) \r\n\r\n\r\ndef buscaProcessos():\r\n comonTermInProc = \"4.01.400\"\r\n processos = []\r\n #blacklist = set('.-\\\"')\r\n #proc.join(c for c in proc if c not in blacklist)\r\n with open(fileFrame, 'r') as a_file:\r\n list_of_lines = a_file.read().splitlines()\r\n a_file.close()\r\n\r\n for i in list_of_lines:\r\n if comonTermInProc in i:\r\n lastWord = i.split()\r\n processos.append(lastWord[1])\r\n\r\n return processos\r\n \r\n\r\ndef gravaListaArquivo(lista):\r\n with open(procs, 'w') as a_file:\r\n #a_file.writelines(lista)\r\n for l in lista:\r\n a_file.write(l+\"\\n\")\r\n a_file.flush()\r\n a_file.close()\r\n \r\n\r\ndef main():\r\n \r\n listaTextFrame = buscaProcessos()\r\n print(listaTextFrame)\r\n \r\n \r\n print(\"insira a opção: \")\r\n op = 0\r\n while (op != 7):\r\n print(\"1 - retorna os processos do frame\")\r\n print(\"2 - grava em arquivo\")\r\n print(\"3 - ...\")\r\n print(\"4 - ...\")\r\n print(\"5 - ...\")\r\n\r\n print(\"7 - Sair\")\r\n op = int(input(\"Escolha a opção: \"))\r\n \r\n if op == 1:\r\n listaProcessos(listaTextFrame)\r\n elif op == 2:\r\n gravaListaArquivo(listaTextFrame)\r\n print(listaTextFrame)\r\n print(\"arquivo salvo\")\r\n pass\r\n\r\n\r\n elif op == 7:\r\n exit\r\n else:\r\n print(\"opção inválida\\n\" + \"op = \" + str(op))\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n\r\n","sub_path":"procNoTxt.py","file_name":"procNoTxt.py","file_ext":"py","file_size_in_byte":4905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"64628427","text":"# mergesort,简洁的py版本1,推荐\n# 两个py版本的merge,都借助了系统的sort,严格的TC:logn*nlogn\n\nclass Solution(object):\n def reversePairs(self, nums: List[int]) -> int:\n def mergeSort(low, high):\n if low + 1 >= high:\n return 0\n \n mid = (low + high) // 2\n cnt = mergeSort(low, mid) + mergeSort(mid, high)\n\n j = mid\n for i in nums[low: mid]:\n while j < high and i > 2 * nums[j]:\n j += 1\n cnt += j - mid\n\n nums[low: high] = sorted(nums[low: high])\n return cnt\n\n return mergeSort(0, len(nums))\n\n\n\n\n\n","sub_path":"01-BasicAlgorithms/18-Sort/problems/493-reverse-pairs/solution1.py","file_name":"solution1.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"531505236","text":"'''\n풀이\n행렬제곱\n'''\ndef productMatrix(arr1, arr2):\n answer = []\n for idx1 in range(len(arr1)):\n row = []\n for idx2 in range(len(arr2[0])):\n tmp = 0\n for idx3 in range(len(arr1[0])):\n tmp += arr1[idx1][idx3] % 1000 * arr2[idx3][idx2] % 1000\n tmp = tmp % 1000\n row.append(tmp)\n answer.append(row)\n return answer\n\n\nN,B = map(int,input().split())\nmatrix = [list(map(lambda x: x % 1000, map(int,input().split()))) for i in range(N)]\n\ndef solve(tmp_b):\n global matrix\n if(tmp_b == 1):\n return matrix\n t = solve(int(tmp_b/2))\n t = productMatrix(t, t)\n if(tmp_b % 2 != 0):\n t = productMatrix(t, matrix)\n return t\n\nfor m in solve(B):\n print(\" \".join(map(str, m)))\n\n\n\n","sub_path":"백준/Python/카테고리/분할정복/10830(행렬 제곱).py","file_name":"10830(행렬 제곱).py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"525898733","text":"from core.advbase import *\n\ndef module():\n return Akasha\n\nclass Akasha(Adv):\n conf = {}\n conf['slots.a'] = [\n 'Study_Rabbits',\n 'Give_Me_Your_Wounded',\n 'Castle_Cheer_Corps',\n 'From_Whence_He_Comes',\n 'Bellathorna'\n ]\n conf['slots.d'] = 'Ariel'\n conf['acl'] = \"\"\"\n `dragon\n `s3\n `s4\n `s2\n `s1, not buff(s1)\n \"\"\"\n conf['coabs'] = ['Dagger2','Tobias','Blade']\n conf['share'] = ['Summer_Luca', 'Patia']\n\n def prerun(self):\n self.team_sp = 0\n\n def s2_charge_sp(self, t):\n self.charge(t.name, 420)\n self.team_sp += 420\n\n def s2_proc(self, e):\n charge_timer = Timer(self.s2_charge_sp, 1.5, True)\n charge_timer.name = e.name\n EffectBuff('sp_regen_zone', 10, lambda: charge_timer.on(), lambda: charge_timer.off()).no_bufftime().on()\n\n def post_run(self, end):\n # self.stats.append(f'team_sp:{self.team_sp}')\n self.comment = f'total {self.team_sp} SP to team from s2'\n\n\nif __name__ == '__main__':\n from core.simulate import test_with_argv\n test_with_argv(None, *sys.argv)\n","sub_path":"adv/akasha.py","file_name":"akasha.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"512305573","text":"import requests, bs4, os, sys\n\n#keyword = ' '.join(sys.argv[1:])\nkeyword = input('Enter the keyword to search') \nurl = 'https://imgur.com/search?q=' + keyword\nos.makedirs('imgurImages', exist_ok=True) \nprint(url)\nx = 0\nwhile x < 10:\n res = requests.get(url)\n res.raise_for_status()\n soup = bs4.BeautifulSoup(res.text, 'html.parser')\n linkElem = soup.select('.image-list-link')\n if linkElem == []:\n print('Could not be found')\n x += 1\n else:\n imgUrl = 'https://imgur.com' + linkElem[x].get('href')\n print('Downloading %s '%imgUrl)\n res = requests.get(imgUrl)\n soup = bs4.BeautifulSoup(res.text, 'html.parser')\n imgSrcs = soup.select('div.Gallery-Content--mediaContainer:nth-child(4) > div:nth-child(1) > div:nth-child(1)')\n print(len(imgSrcs))\n \"\"\"\n for i in range(len(imgSrcs)):\n imageUrl = imgSrcs[i].get('src')\n print(imageUrl)\n print(os.path.basename(imageUrl))\n imageFile = open(os.path.join('imgurImages', os.path.basename(imageUrl)), 'wb')\n print('Downloading %s '% imageUrl)\n res = requests.get(imageUrl)\n res.raise_for_status()\n for chunk in res.iter_content(100000):\n imageFile.write(chunk)\n imageFile.close()\n \"\"\"\n x += 1\n \n","sub_path":"downloadingimgurimages.py","file_name":"downloadingimgurimages.py","file_ext":"py","file_size_in_byte":1359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"368641367","text":"# -*- coding: utf-8 -*-\n\n# Project Euler 0080\n# It is well known that if the square root of a natural number is not an\n# integer, then it is irrational. The decimal expansion of such square roots i\n# infinite without any repeating pattern at all.\n# The square root of two is 1.41421356237309504880..., and the digital sum of\n# the first one hundred decimal digits is 475.\n# For the first one hundred natural numbers, find the total of the digital sums\n# of the first one hundred decimal digits for all the irrational square roots.\n# PNG:\n# joder que mal explicado, no eran los primeros 100 dígitos, eran calculamos\n# los primeros 100 dígitos MAS los dígitos enteros... ¬¬ en fin, que se\n# explican como el puto CU-LO\n# Find the square root of 2.\n# 1. 4 1 4 2\n# /\n# \\/ 02.00 00 00 00\n# 02 1*1 <= 2 < 2*2 x = 1\n# 01 y = x*x = 1*1 = 1\n# 01 00 24*4 <= 100 < 25*5 x = 4\n# 00 96 y = (20+x)*x = 24*4 = 96\n# 04 00 281*1 <= 400 < 282*2 x = 1\n# 02 81 y = (280+x)*x = 281*1 = 281\n# 01 19 00 2824*4 <= 11900 < 2825*5 x = 4\n# 01 12 96 y = (2820+x)*x = 2824*4 = 11296\n# 06 04 00 28282*2 <= 60400 < 28283*3 x = 2\n# The desired precision is achieved:\n# The square root of 2 is about 1.4142\n\n\ndef raiz_x(p, c):\n # x(20p+x) <= c\n x = 0\n while x*((20*p)+x) <= c:\n x += 1\n\n return x-1\n\n\ndef raiz_sum_dec(n, decimales):\n # pasamos el número a cadena, y añadimos un 0 delante si es impar el número\n # de dígitos\n if len(str(n)) % 2 == 0:\n num = str(n)\n else:\n num = '0' + str(n)\n\n c = 0\n p = 0\n numdec = 0\n sumdec = 0\n while numdec < decimales-1:\n\n # los dos dígitos a tratar\n if len(num) > 0:\n c = (c*100) + int(num[0:2])\n num = num[2:]\n else:\n if c == 0 and numdec == 0:\n return 0\n c = c * 100\n numdec += 1\n\n x = raiz_x(p, c)\n y = ((20*p)+x)*x\n p = (p*10)+x\n\n # el nuevo c...\n c = c - y\n\n sumdec += x\n\n return sumdec\n\n\ndef result():\n sumatory = 0\n for r in range(1, 101):\n sumatory += raiz_sum_dec(r, 100)\n\n # print(\"Resultado 0080:\", sumatory)\n return sumatory\n","sub_path":"projecteuler/problems/d0075/p0080/r0080.py","file_name":"r0080.py","file_ext":"py","file_size_in_byte":2501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"551305309","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, unicode_literals\n\nfrom django.conf.urls import url\n\nfrom .views import (\n MainPageView,\n ProfileView,\n ProfileEditView,\n AvatartEditView,\n ReferenceCreateView,\n ReferenceListView,\n)\n\n\nurlpatterns = [\n url(r'^$',\n MainPageView.as_view(),\n name='index'),\n url(r'^profile-edit/$',\n ProfileEditView.as_view(),\n name='profile_edit'),\n url(r'^profile/(?P[0-9a-zA-Z_.-]{3,15})/$',\n ProfileView.as_view(),\n name='profile'),\n url(r'^avatar-upload/$',\n AvatartEditView.as_view(),\n name='avatar_upload'),\n url(r'^reference-create/$',\n ReferenceCreateView.as_view(),\n name='reference_create'),\n url(r'^my-reviews/$',\n ReferenceListView.as_view(),\n name='my_reviews'),\n]\n","sub_path":"barter/users/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"146306087","text":"from itertools import product\n\nBORDER = -1\nEMPTY = 0\nME = 1\nOPPONENT = 2\n\n\ndef count_pattern(board_without_border, pattern):\n count = 0\n\n # add border to the board\n board_size = len(board_without_border) + 2\n board = list()\n board.append([BORDER] * board_size)\n for row in board_without_border:\n board.append([BORDER] + row.copy() + [BORDER])\n board.append([BORDER] * board_size)\n\n offsets = list(product((-1, 0, 1), (-1, 0, 1)))\n offsets.remove((0, 0))\n for i, j in product(range(board_size), repeat=2):\n if board[i][j] == pattern[0]:\n for offset in offsets:\n try:\n count += all(board[i + offset[0] * x][j + offset[1] * x] == p for x, p in enumerate(pattern))\n except IndexError:\n continue\n\n return count // 2 if pattern == pattern[::-1] else count\n","sub_path":"board_evaluation.py","file_name":"board_evaluation.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"346693686","text":"# -*- coding: utf-8 -*-\n\"\"\"\nHotstart example for a basic schism run with TEMP and SALT as tracers.\n\"\"\"\n\nimport schimpy.schism_hotstart as sh\nimport matplotlib.pyplot as plt\n\nyaml_fn = \"./hotstart.yaml\"\nmodules = ['TEM','SAL']\nhotstart_fn = \"hotstart.nc\" # output hotstart file\n\n# create a hotstart file for SCHISM\nh = sh.hotstart(yaml_fn,modules=modules,\n crs ='EPSG:26910')\nh.create_hotstart()\nhnc = h.nc_dataset\nhnc.to_netcdf(hotstart_fn) \n\n#%% making a 2D surface plot\ncoll = h.mesh.plot_elems(hnc['tr_el'].values[:,0,0], clim=(14,18)) #clim=[0,35])\ncb = plt.colorbar(coll)\nplt.axis('off')\nplt.axis('equal')\nplt.title('Regional Temperature')\nplt.tight_layout(pad=1)\n\n#%% converting hotstart file to schism output format so that it can be viewd by VisIt\nsh.hotstart_to_outputnc(hotstart_fn,str(h.date),hgrid_fn='../../data_in/hgrid.gr3', \n vgrid_fn='../../data_in/vgrid.in.3d',vgrid_version=h.vgrid_version,\n outname=\"schout_hotstart.nc\")\n\n","sub_path":"examples/hotstart/examples/basic/create_hotstart.py","file_name":"create_hotstart.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"306609495","text":"# Fields for baracoda\nFIELD_COG_BARCODE = \"cog_barcode\"\n\n###\n# MongoDB field names\n###\n# samples collection\n# general fields\nFIELD_ROOT_SAMPLE_ID = \"Root Sample ID\"\nFIELD_RNA_ID = \"RNA ID\"\nFIELD_RESULT = \"Result\"\nFIELD_COORDINATE = \"coordinate\"\nFIELD_SOURCE = \"source\"\nFIELD_LAB_ID = \"Lab ID\"\nFIELD_PLATE_BARCODE = \"plate_barcode\"\nFIELD_DATE_TESTED = \"Date Tested\"\nFIELD_LH_SOURCE_PLATE_UUID = \"lh_source_plate_uuid\"\nFIELD_LH_SAMPLE_UUID = \"lh_sample_uuid\"\nFIELD_FILTERED_POSITIVE = \"filtered_positive\"\n\n# priority_samples collection\nFIELD_SAMPLE_ID = \"sample_id\"\nFIELD_MUST_SEQUENCE = \"must_sequence\"\nFIELD_PREFERENTIALLY_SEQUENCE = \"preferentially_sequence\"\nFIELD_PROCESSED = \"processed\"\n\n# source_plates collection\nFIELD_BARCODE = \"barcode\"\n\n###\n# DART specific column names:\n###\nFIELD_DART_DESTINATION_BARCODE = \"destination_barcode\"\nFIELD_DART_DESTINATION_COORDINATE = \"destination_coordinate\"\nFIELD_DART_SOURCE_BARCODE = \"source_barcode\"\nFIELD_DART_SOURCE_COORDINATE = \"source_coordinate\"\nFIELD_DART_CONTROL = \"control\"\nFIELD_DART_ROOT_SAMPLE_ID = \"root_sample_id\"\nFIELD_DART_RNA_ID = \"rna_id\"\nFIELD_DART_LAB_ID = \"lab_id\"\nFIELD_DART_RUN_ID = \"dart_run_id\"\n\n###\n# MLWH lighthouse_samples table field names\n###\nMLWH_LH_SAMPLE_ROOT_SAMPLE_ID = \"root_sample_id\"\nMLWH_LH_SAMPLE_COG_UK_ID = \"cog_uk_id\"\nMLWH_LH_SAMPLE_RNA_ID = \"rna_id\"\nMLWH_LH_SAMPLE_RESULT = \"result\"\n\n###\n# Sequencescape sample field names\n###\nFIELD_SS_SAMPLE_DESCRIPTION = \"sample_description\"\nFIELD_SS_NAME = \"name\"\nFIELD_SS_LAB_ID = \"lab_id\"\nFIELD_SS_RESULT = \"result\"\nFIELD_SS_SUPPLIER_NAME = \"supplier_name\"\nFIELD_SS_PHENOTYPE = \"phenotype\"\nFIELD_SS_CONTROL = \"control\"\nFIELD_SS_CONTROL_TYPE = \"control_type\"\nFIELD_SS_UUID = \"uuid\"\nFIELD_SS_COORDINATE = \"coordinate\"\nFIELD_SS_BARCODE = \"barcode\"\n","sub_path":"lighthouse/constants/fields.py","file_name":"fields.py","file_ext":"py","file_size_in_byte":1771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"455449284","text":"import dash\r\nimport dash_core_components as dcc\r\nimport dash_html_components as html\r\nimport pandas as pd\r\nimport plotly.graph_objs as go\r\nimport dash_table\r\nfrom tab1.view import import_tab1\r\nfrom tab2.view import import_tab2\r\nfrom tab3.view import import_tab3\r\nfrom dash.dependencies import Input, Output, State\r\nfrom sqlalchemy import create_engine\r\n\r\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\r\n\r\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\r\n\r\nengine = create_engine(\"mysql+mysqlconnector://root:theo123@localhost/tsa?host=127.0.0.1?port=3306\")\r\nconn = engine.connect()\r\nresult = conn.execute('select * from datatsa').fetchall()\r\ndfTSA = pd.DataFrame(result, columns = result[0].keys())\r\n\r\ndef all_gen():\r\n a = [{'label' : i, 'value' : i} for i in dfTSA['Claim Site'].unique()]\r\n a.append({'label' : 'All', 'value' : 'All'})\r\n return a\r\n\r\napp.layout = html.Div(children = [\r\n html.H1('UJIAN MODUL 2 DASHBOARD TSA'),\r\n html.P('Created by: Theo'),\r\n dcc.Tabs(value = 'tabs', id = 'tabs-1', children = [\r\n\r\n dcc.Tab(label = 'DataFrame Table', id = 'tab-df', children = [\r\n html.Div(children = [\r\n html.P('Claim Site:'),\r\n dcc.Dropdown(id = 'x-axis-4',\r\n options = all_gen(),\r\n value = 'All') \r\n ],className = 'col-3'),\r\n\r\n html.Div(children = [\r\n html.P('Max Rows: '),\r\n dcc.Input(\r\n id='x-axis-5',\r\n type='number',\r\n value = 10,\r\n placeholder='Input number'), \r\n ],className = 'col-3'),\r\n\r\n html.Br(),\r\n html.Div(html.Button('Search'), id = 'search2', className = 'col-3'),\r\n\r\n html.Br(),\r\n\r\n html.Div([\r\n dash_table.DataTable(\r\n id='table',\r\n columns=[{\"name\": i, \"id\": i} for i in dfTSA.columns],\r\n data=dfTSA.to_dict('records'),\r\n page_action = \"native\",\r\n page_current = 0,\r\n page_size = 10,\r\n )\r\n ])\r\n ]),\r\n\r\n #tab1\r\n dcc.Tab(label = 'Bar-Chart', id = 'tab-satu', children = [\r\n html.Div(children = [\r\n html.Div(children = [\r\n html.P('Y1'),\r\n dcc.Dropdown(id = 'x-axis-1',\r\n options = [{'label' : i, 'value' : i} for i in dfTSA.select_dtypes('number').columns],\r\n value = 'Claim Amount') \r\n ],className = 'col-3'),\r\n\r\n\r\n html.Div(children = [\r\n html.P('Y2'),\r\n dcc.Dropdown(id = 'x-axis-2',\r\n options = [{'label' : i, 'value' : i} for i in dfTSA.select_dtypes('number').columns],\r\n value = 'Close Amount') \r\n ],className = 'col-3'),\r\n\r\n html.Div(children = [\r\n html.P('X'),\r\n dcc.Dropdown(id = 'x-axis-3',\r\n options = [{'label' : i, 'value' : i} for i in ['Claim Type', 'Claim Site', 'Disposition']],\r\n value = 'Claim Site') \r\n ],className = 'col-3')\r\n ],className = 'row'),\r\n\r\n html.Div(html.Button('Search'), id = 'search', className = 'col-3'),\r\n html.Div([\r\n dcc.Graph(\r\n id = 'contoh-graph-bar',\r\n figure = {\r\n 'data' : [\r\n {'x' : dfTSA['Claim Type'], 'y' : dfTSA['Claim Amount'], 'type' : 'bar', 'name' : 'Claim Amount'},\r\n {'x' : dfTSA['Claim Type'], 'y' : dfTSA['Close Amount'], 'type' : 'bar', 'name' : 'Close Amount'}\r\n ], \r\n 'layout' : {'title' : 'Dashboard TSA Bar'}\r\n })])\r\n ]),#penutup tab1\r\n\r\n #tab 2\r\n dcc.Tab(label = 'Pie-Chart', id = 'tab-dua', children = [\r\n html.Div(\r\n dcc.Dropdown(id = 'x-axis-6',\r\n options = [{'label' : i, 'value' : i} for i in dfTSA.select_dtypes('number').columns],\r\n value = 'Claim Amount') \r\n ,className = 'col-3'),\r\n html.Div([\r\n dcc.Graph(\r\n id = 'contoh-graph-pie',\r\n figure = {\r\n 'data' : [\r\n go.Pie(labels = list(dfTSA['Claim Type'].unique()),\r\n values = [dfTSA.groupby('Claim Type').mean()['Claim Amount'][i] for i in list(dfTSA['Claim Type'].unique())],\r\n sort = False)\r\n ], \r\n 'layout' : {'title' : 'Mean Pie Chart'}\r\n })])\r\n ]), #penutup tab2\r\n\r\n #tab 3\r\n dcc.Tab(label = 'Scatter-Chart', id = 'tab-tiga', children = [\r\n html.Div(children = dcc.Graph (\r\n id = 'graph-scatter',\r\n figure = { 'data' : [\r\n go.Scatter(\r\n x = dfTSA['Claim Amount'],\r\n y = dfTSA['Close Amount'],\r\n mode = 'markers'\r\n )\r\n ],\r\n 'layout' : go.Layout(\r\n xaxis = {'title' : 'Close Amount'},\r\n yaxis = {'title' : 'Claim Amount'},\r\n hovermode = 'closest'\r\n )\r\n }\r\n ))\r\n ])\r\n ],\r\n content_style = {\r\n 'fontFamily' : 'Arial',\r\n 'borderBottom' : '1px solid #d6d6d6',\r\n 'borderLeft' : '1px solid #d6d6d6',\r\n 'borderRight' : '1px solid #d6d6d6',\r\n 'padding' : '44px'\r\n })\r\n],style={'maxWidth': '1200px', 'margin': '0 auto'})\r\n\r\n@app.callback(\r\n [Output(component_id = 'table', component_property = 'data'),\r\n Output(component_id = 'table', component_property = 'page_size')],\r\n [Input(component_id = 'search2', component_property = 'n_clicks')],\r\n [State(component_id = 'x-axis-4', component_property = 'value'),\r\n State(component_id = 'x-axis-5', component_property = 'value')]\r\n)\r\n\r\ndef create_data_frame(n_clicks, x1, x2):\r\n if x1 == 'All':\r\n data = dfTSA.to_dict('records')\r\n else:\r\n data = dfTSA[dfTSA['Claim Site'] == x1].to_dict('records')\r\n page_size = x2\r\n return data, page_size\r\n\r\n@app.callback(\r\n Output(component_id = 'contoh-graph-bar', component_property = 'figure'),\r\n [Input(component_id = 'search', component_property = 'n_clicks')],\r\n [State(component_id = 'x-axis-1', component_property = 'value'),\r\n State(component_id = 'x-axis-2', component_property = 'value'),\r\n State(component_id = 'x-axis-3', component_property = 'value')]\r\n)\r\n\r\ndef create_graph_bar(n_clicks,x1, x2, x3):\r\n figure = {\r\n 'data' : [\r\n {'x' : dfTSA[x3], 'y' : dfTSA[x1], 'type' : 'bar', 'name' : x1},\r\n {'x' : dfTSA[x3], 'y' : dfTSA[x2], 'type' : 'bar', 'name' : x2}\r\n ], \r\n 'layout' : {'title' : 'Dashboard Pokemon Attack'}\r\n }\r\n return figure \r\n\r\n@app.callback(\r\n Output(component_id = 'contoh-graph-pie', component_property = 'figure'),\r\n [Input(component_id = 'x-axis-6', component_property = 'value')]\r\n)\r\n\r\ndef create_graph_pie(x1):\r\n figure = {\r\n 'data' : [\r\n go.Pie(labels = list(dfTSA['Claim Type'].unique()),\r\n values = [dfTSA.groupby('Claim Type').mean()[x1][i] for i in list(dfTSA['Claim Type'].unique())],\r\n sort = False)\r\n ], \r\n 'layout' : {'title' : 'Mean Pie Chart'}\r\n }\r\n return figure \r\n\r\nif __name__ == '__main__':\r\n app.run_server(debug=True)","sub_path":"Script_Ujian_Modul_2_TheoJeremiah.py","file_name":"Script_Ujian_Modul_2_TheoJeremiah.py","file_ext":"py","file_size_in_byte":7485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"143558208","text":"S=input()\nx=int(S[:2])\ny=int(S[2:])\nif x==0:\n if 0=13:\n if 0= A[j]:\n break\n if diff in dic:\n i = dic[diff]\n dp[j][k] = dp[i][j] + 1\n ans = max(ans, dp[j][k])\n else:\n continue\n\n return ans\n \n","sub_path":"python/873 Length of Longest Fibonacci Subsequence.py","file_name":"873 Length of Longest Fibonacci Subsequence.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"189582972","text":"from .crud import *\nfrom requests import post\nfrom app.routes.device.crud import get_device_by_id, get_devices\nfrom datetime import datetime\n\n\ndef get_relay_from_db(sensor_id=None, start=None, end=None, last=False):\n try:\n if sensor_id:\n if last:\n return get_relay_by_senor_id(sensor_id, last=True)\n if start is None and end is None:\n return get_relay_by_senor_id(sensor_id)\n if start is None:\n start = 0\n if end is None:\n end = int(datetime.timestamp(datetime.now()))\n return get_relay_start_end_date(start, end, sensor_id)\n else:\n if last:\n return get_all_relay(last=True)\n if start is None and end is None:\n return get_all_relay()\n if start is None:\n start = 0\n if end is None:\n end = int(datetime.timestamp(datetime.now()))\n return get_relay_start_end_date(start, end)\n except Exception:\n raise Exception\n\n\ndef action(req, sensor_id):\n try:\n dev_IP = get_device_by_id(req.device_id).ip\n url = f\"http://{dev_IP}/relay/{sensor_id - 1}\"\n response = post(url, json=req.__dict__)\n if response.status_code == 200:\n add_relay_status(req, sensor_id)\n else:\n raise ConnectionError\n return {\"status\": \"ok\"}\n except Exception:\n raise Exception\n\n\n","sub_path":"app/routes/relay/job.py","file_name":"job.py","file_ext":"py","file_size_in_byte":1473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"402577505","text":"import math\ncount_numbers = int(input())\nodd_sum = 0.0\nodd_min = math.inf\nodd_max = -math.inf\neven_sum = 0.0\neven_min = math.inf\neven_max = -math.inf\nfor i in range(1, count_numbers + 1):\n new_number = float(input())\n if i % 2 == 1:\n odd_sum += new_number\n odd_max = max(odd_max, new_number)\n odd_min = min(odd_min, new_number)\n else:\n even_sum += new_number\n even_max = max(even_max, new_number)\n even_min = min(even_min, new_number)\nif odd_sum.is_integer():\n print(f'OddSum={odd_sum:.0f},')\nelse:\n print(f'OddSum={odd_sum},')\nif odd_min == math.inf:\n print('OddMin=No')\nelif odd_min.is_integer():\n print(f'OddMin={odd_min:.0f},')\nelse:\n print(f'OddMin={odd_min},')\nif odd_max == -math.inf:\n print('OddMax=No')\nelif odd_max.is_integer():\n print(f'OddMax={odd_max:.0f},')\nelse:\n print(f'OddMax={odd_max},')\n\nif even_sum.is_integer():\n print(f'EvenSum={even_sum:.0f},')\nelse:\n print(f'EvenSum={even_sum},')\nif even_min == math.inf:\n print('EvenMin=No')\nelif even_min.is_integer():\n print(f'EvenMin={even_min:.0f},')\nelse:\n print(f'EvenMin={even_min},')\nif even_max == -math.inf:\n print('EvenMax=No')\nelif even_max.is_integer():\n print(f'EvenMax={even_max:.0f},')\nelse:\n print(f'EvenMax={even_max},')\n","sub_path":"Python-Basic/ex_lec_6_for_loop/03_odd_or_even_position.py","file_name":"03_odd_or_even_position.py","file_ext":"py","file_size_in_byte":1298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"557708186","text":"from astronomaly.data_management import image_reader\nfrom astronomaly.preprocessing import image_preprocessing\nfrom astronomaly.feature_extraction import power_spectrum\nfrom astronomaly.dimensionality_reduction import decomposition\nfrom astronomaly.postprocessing import scaling\nfrom astronomaly.anomaly_detection import isolation_forest, human_loop_learning\nfrom astronomaly.clustering import tsne\nimport os\n\n\nimage_dir = '/home/michelle/BigData/Anomaly/GOODS_S/'\noutput_dir = '/home/michelle/BigData/Anomaly/astronomaly_output/images/'\n\n\ndef run_pipeline():\n \"\"\"\n An example of the full astronomaly pipeline run on image data\n\n Parameters\n ----------\n image_dir : str\n Directory where images are located (can be a single fits file or several)\n features : str, optional\n Which set of features to extract on the cutouts\n dim_reduct : str, optional\n Which dimensionality reduction algorithm to use (if any)\n anomaly_algo : str, optional\n Which anomaly detection algorithm to use\n\n Returns\n -------\n pipeline_dict : dictionary\n Dictionary containing all relevant data including cutouts, features and anomaly scores\n\n \"\"\"\n\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n if len(image_dir) != 0:\n image_dataset = image_reader.ImageDataset(image_dir,\n transform_function=image_preprocessing.image_transform_log,\n window_size=128)\n\n pipeline_psd = power_spectrum.PSD_Features(force_rerun=False, output_dir=output_dir)\n features_original = pipeline_psd.run_on_dataset(image_dataset)\n\n pipeline_pca = decomposition.PCA_Decomposer(force_rerun=False, output_dir=output_dir,n_components=2)\n features = pipeline_pca.run(features_original)\n\n pipeline_scaler = scaling.FeatureScaler(force_rerun=False,output_dir=output_dir)\n features = pipeline_scaler.run(features)\n\n pipeline_iforest = isolation_forest.IforestAlgorithm(force_rerun=False,output_dir=output_dir)\n anomalies = pipeline_iforest.run(features)\n\n pipeline_score_converter = human_loop_learning.ScoreConverter(force_rerun=False,output_dir=output_dir)\n anomalies = pipeline_score_converter.run(anomalies)\n anomalies = anomalies.sort_values('score', ascending=False)\n\n pipeline_active_learning = human_loop_learning.NeighbourScore(alpha=1)\n\n pipeline_tsne = tsne.TSNE_Plot(force_rerun=False,output_dir=output_dir, perplexity=50)\n t_plot = pipeline_tsne.run(features)\n # t_plot = np.log(features_scaled + np.abs(features_scaled.min())+0.1)\n\n return {'dataset':image_dataset, 'features':features, 'anomaly_scores': anomalies,\n 'cluster':t_plot, 'active_learning':pipeline_active_learning}\n else:\n return None\n\n\n# run_pipeline(image_dir='/home/michelle/BigData/Anomaly/Meerkat_deep2/')\n\n\n","sub_path":"astronomaly/scripts/image_pipeline.py","file_name":"image_pipeline.py","file_ext":"py","file_size_in_byte":2970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"613657289","text":"from rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom rest_framework import status\nfrom django.db.models import F, Value as V\nfrom django.db.models.functions import Concat\n\nfrom users.models import User\nfrom users.schema import WorkerListSchema\n\n\nclass WorkerList(APIView):\n schema = WorkerListSchema()\n\n def check_permissions(self, request):\n if not request.user.is_authenticated or not request.user.is_manager:\n self.permission_denied(request, 'Only managers has permission to perform this action')\n\n def get(self, request, *args, **kwargs):\n data = (\n User.objects\n .is_worker()\n .annotate(\n user_id=F('id'),\n full_name=Concat('first_name', V(' '), 'last_name'),\n ).values(\n 'user_id',\n 'full_name'\n )\n )\n return Response(data, status=status.HTTP_200_OK)\n","sub_path":"users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"182375170","text":"from xml.etree import cElementTree as ElementTree\nfrom xmlparser import xml_to_dict\nimport json\n\ndef main():\n texts = open('weather.xml','r')\n tree = ElementTree.parse(texts)\n root = tree.getroot()\n xmldict = xml_to_dict().parse(root)\n weather_json = open('weather.json','a')\n weather_json.write(json.dumps(xmldict))\n\nif __name__ == '__main__':\n main()","sub_path":"weather/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"608664072","text":"import os\nfrom kivy.utils import platform\nif platform == 'android':\n os.environ[\"IMAGEIO_FFMPEG_EXE\"] = \"/usr/bin/ffmpeg\"\nimport moviepy.editor as mpy\nimport glob, uuid, threading\nfrom PIL import Image\nfrom kivy.core.window import Window\nfrom proglog import ProgressBarLogger\nfrom math import floor\nfrom functools import partial\n\n#Globales\nfrom pytikzgenerate import globales\n\n#KIVY\nfrom kivy.lang import Builder\nfrom kivy.uix.relativelayout import RelativeLayout\nfrom kivy.graphics import Color,Rectangle,Ellipse,Line\n\n#FRAMEWORK KIVYMD\nfrom kivymd.uix.dialog import MDDialog\nfrom kivymd.uix.boxlayout import MDBoxLayout\nfrom kivymd.uix.button import MDRaisedButton\n\n#Librerias propias\nfrom pytikzgenerate.modulos.limpiar_recursos import limpiar_recursos\n\nclass InfoProgreso(MDBoxLayout):\n pass\n\n#PROGRESS BAR - CÓDIGO KIVY\nBuilder.load_string('''\n:\n orientation: \"vertical\"\n size_hint_y: None\n height: dp(100)\n MDLabel:\n id: porcentaje_de_realizacion\n font_name: \"media/fonts/OpenSans-ExtraBold\"\n font_style: \"H6\"\n halign: \"center\"\n size_hint: (1.0, .15)\n MDLabel:\n id: info\n font_name: \"media/fonts/OpenSans-ExtraBold\"\n font_style: \"Body1\"\n halign: \"center\"\n size_hint: (1.0, .75)\n MDProgressBar:\n id: porcentaje_actual\n size_hint: (1.0, .1)\n''')\n\n#PROGRESS BAR - API DEL MOVIEPY\nclass ProgresoCreacionImagen(ProgressBarLogger):\n # `window` is the class where all the gui widgets are held\n def __init__(self,clase_generar_archivo):\n super().__init__(init_state=None, bars=None, ignored_bars=None,\n logged_bars='all', min_time_interval=0, ignore_bars_under=0)\n self.clase_generar_archivo = clase_generar_archivo\n def callback(self, **changes):\n # Every time the logger is updated, this function is called with\n # the `changes` dictionnary of the form `parameter: new value`.\n # the `try` is to avoid KeyErrors before moviepy generates a `'t'` dict \n try:\n index = self.state['bars']['t']['index']\n total = self.state['bars']['t']['total']\n porcentaje_de_realizacion = index / total * 100\n if porcentaje_de_realizacion < 0:\n porcentaje_de_realizacion = 0\n if porcentaje_de_realizacion > 100:\n porcentaje_de_realizacion = 100\n self.clase_generar_archivo.actualizar_wid(porcentaje_de_realizacion, index=index, total=total,generar_dibujo_en_formato=True)\n except KeyError as e:\n print(\"ERROR\")\n print(e)\n\nclass GuardarDibujoEnImagen():\n def __init__(self,area_de_dibujar):\n # CONFIGURACIÓN DEL WID A DESCARGAR COMO IMG\n w,h = Window.size\n print(\"Window.size\")\n print(Window.size)\n self.wid_gif = RelativeLayout(size=(w,h))\n with self.wid_gif.canvas:\n Color(1,1,1,0)\n Rectangle(pos=self.wid_gif.pos,size=self.wid_gif.size)\n \n def figura_a_png(self,generar_dibujo_en_formato,name_figure,size,pos,color_relleno,tipo_de_linea,coords_borde,color_borde,line_width,angle_start=0,angle_end=0): \n #APLICAR RELLENO\n with self.wid_gif.canvas:\n Color(*color_relleno)\n if name_figure == \"rectangle\":\n with self.wid_gif.canvas:\n figura = Rectangle(pos=pos,size=size)\n elif name_figure == \"arc\":\n with self.wid_gif.canvas:\n figura = Ellipse(pos=pos,size=size,angle_start=angle_start,angle_end=angle_end)\n elif name_figure == \"circle\":\n with self.wid_gif.canvas:\n figura = Ellipse(pos=pos,size=size)\n #APLICAR BORDE\n if tipo_de_linea:\n with self.wid_gif.canvas:\n Color(*color_borde)\n if name_figure == \"rectangle\":\n with self.wid_gif.canvas:\n #BORDE RECTANGULO CON LINEAS DISCONTINUADAS\n Line(points=coords_borde, dash_offset=10, dash_length=5)\n elif name_figure == \"arc\":\n with self.wid_gif.canvas:\n Line(circle=coords_borde, dash_offset=10, dash_length=5)\n else:\n if name_figure == \"rectangle\":\n with self.wid_gif.canvas:\n #BORDE RECTANGULO\n Line(points=coords_borde,width=line_width)\n elif name_figure == \"arc\":\n with self.wid_gif.canvas:\n Line(circle=coords_borde,width=line_width)\n if generar_dibujo_en_formato:\n #ESTE PROCESO TMB TOMA TIEMPO PERO NO SE COMO REFLEJARLO EN UN PROGRESSBAR\n #1. Guardar imagenes con transparencia, con el proposito de que la imagen sea lo unico sin transparencia...\n id_figura = str(figura.uid)\n nombre_img = 'figura_estandar_'+id_figura+\".png\"\n ruta = os.path.join(globales.ruta_raiz,'recursos/crear_imagen/grafica_original/'+nombre_img)\n self.wid_gif.export_to_png(ruta)\n\n #2. Quitar transparencia de la imagen para solo conservar la figura\n image_png = Image.open(ruta)\n image_png.getbbox()\n image_png = image_png.crop(image_png.getbbox())\n \n #3. Convertir PNG a JPG para ser compatible como secuencia de imagenes de un .GIF (Si es requerido)\n image_png.load()\n background = Image.new(\"RGB\", image_png.size, (255, 255, 255))\n background.paste(image_png, mask=image_png.split()[3])\n nombre_img = 'figura_estandar_'+id_figura+\".jpg\"\n ruta = os.path.join(globales.ruta_raiz,'recursos/crear_imagen/grafica_recortada/'+nombre_img)\n background.save(ruta, 'JPEG', quality=80)\n\n def crear_imagen(self):\n #1. CONFIGURACIÓN - PROGRESS BAR\n self.old_value = 0#IMPORTANTE - ANTERIOR VALOR DEJADO POR EL ANTERIOR IMPULSO DE CARGA ILUSTRADO [PROGRESS BAR]\n\n #2. DESPLIEGUE DEL POP UP, CONEXIÓN AL API Logger del MoviePy Y CREAR UN GIF ANIMADO...\n self.contenido_progreso_wid = InfoProgreso()\n btn_salir = MDRaisedButton(text=\"Vale\",font_name=os.path.join(globales.ruta_raiz,\"media/fonts/OpenSans-SemiBold\"))\n self.md_dialog = MDDialog(\n title=\"Información del progreso de generación\",\n type=\"custom\",\n radius=[20, 7, 20, 7],\n content_cls=self.contenido_progreso_wid,\n buttons=[\n btn_salir\n ]\n )\n #Agregar los comportamientos correspondientes\n def cerrar_md_dialog(md_dialog,*args):\n md_dialog.dismiss()\n btn_salir.bind(on_release=partial(cerrar_md_dialog,self.md_dialog))\n self.md_dialog.open()\n #Genera GIF a partir de una lista de secuencia de imagenes\n threading.Thread(target=self.__crear_imagen).start()#En este caso la función por la cual el Progress Bar llenara hasta ser terminado es el \"self.onMul\"\n\n def __crear_imagen(self):\n #CONEXIÓN AL API Logger del MoviePy\n my_bar_logger = ProgresoCreacionImagen(self)\n #GENERAR ARCHIVO\n id = str(uuid.uuid4())\n #Ordenar los archivos de forma ascendente\n input_png_list = glob.glob(os.path.join(globales.ruta_raiz,\"recursos/crear_imagen/grafica_recortada/*.jpg\"))\n input_png_list.sort()\n clips = [mpy.ImageClip(i).set_duration(.1)\n for i in input_png_list]\n #¿Hay secuencia de imagenes o almenos una imagen?\n if (len(clips) > 0):\n concat_clip = mpy.concatenate_videoclips(clips, method=\"compose\")\n #No es una secuencia de imagenes - GENERAR JPG\n if len(clips) == 1:\n self.ruta_imagen_creado = os.path.join(globales.ruta_imagen,'Pytikz/imagen_generado_id-'+id+'.jpg').replace(\"/\",\"\\\\\")\n concat_clip.write_gif(self.ruta_imagen_creado,fps=2, logger=my_bar_logger)\n #Es una secuencia de imagenes - GENERAR GIF\n else:\n self.ruta_imagen_creado = os.path.join(globales.ruta_imagen,'Pytikz/imagen_generado_id-'+id+'.gif').replace(\"/\",\"\\\\\")\n concat_clip.write_gif(self.ruta_imagen_creado,fps=2, logger=my_bar_logger)\n \n #Si no lo hay es un error...\n else:\n self.md_dialog.title = \"¡ERROR!\"\n self.contenido_progreso_wid.ids.info.text = f\"Ocurrio un error al momento de crear la imagen\"\n \n #Limpiar recursos\n limpiar_recursos()\n\n def actualizar_wid(self, porcentaje_de_realizacion, info=\"\",index=0, total=0, generar_dibujo_en_formato=False,*args):\n porcentaje_actual = floor(porcentaje_de_realizacion)\n if porcentaje_actual != self.old_value and porcentaje_actual % 5 == 0:\n self.contenido_progreso_wid.ids.porcentaje_actual.value = porcentaje_actual#IMPORTANTE - [PROGRESS BAR]\n self.contenido_progreso_wid.ids.porcentaje_de_realizacion.text = \"PROGRESO: \"+str(porcentaje_actual)\n if generar_dibujo_en_formato:\n self.contenido_progreso_wid.ids.info.text = f\"{index} de {total} frames de la imagen completados... ({floor(porcentaje_de_realizacion)}%)\"\n else:\n self.contenido_progreso_wid.ids.info.text = info\n if(porcentaje_actual == 100):\n self.md_dialog.title = \"¡EXITO!\"\n self.contenido_progreso_wid.ids.porcentaje_de_realizacion.text = \"La imagen se creo satisfactoriamente\"\n self.contenido_progreso_wid.ids.info.text = \"Ruta: \"+self.ruta_imagen_creado","sub_path":"pytikzgenerate/modulos/submodulos/validador_pytikz/guardar_dibujo_en_formato.py","file_name":"guardar_dibujo_en_formato.py","file_ext":"py","file_size_in_byte":9599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"55887865","text":"#!/usr/bin/env python3\n\nimport bluetooth, threading\n\nfrom ev3dev2.led import Leds\nfrom ev3dev2.sound import Sound\nfrom ev3dev2._platform.ev3 import INPUT_1, INPUT_4\nfrom ev3dev2.motor import LargeMotor, MoveTank, OUTPUT_A, OUTPUT_D, SpeedPercent\nfrom ev3dev2.sensor.lego import TouchSensor, ColorSensor, UltrasonicSensor\n\nis_master = True\n# is_master = False\nserver_mac = '00:17:E9:B4:C7:4E'\n# slave_mac = 'CC:78:AB:4F:2E:4B'\n\nSPEED = 35\nTIME = 0.6\n\nRED = 5\nYELLOW = 4\nBLUE = 2\n\nred_detected = False\nyellow_detected = False\nblue_detected = False\n\ns = Sound()\nleds = Leds()\ncs = ColorSensor()\nus = UltrasonicSensor()\nus.mode = 'US-DIST-CM'\nts_left = TouchSensor(INPUT_1)\nts_right = TouchSensor(INPUT_4)\n\n\ndef speak(color):\n if color == RED:\n s.beep(440, 0.5)\n if color == YELLOW:\n s.beep(493, 0.5)\n if color == BLUE:\n s.beep(523, 0.5)\n \n\ndef connect(server_mac, is_master=True):\n port = 3\n if is_master:\n print(\"I AM MASTER!!!!!\")\n server_sock = bluetooth.BluetoothSocket(bluetooth.RFCOMM)\n server_sock.bind((server_mac, port))\n server_sock.listen(1)\n print('Listening...')\n client_sock, address = server_sock.accept()\n print('Accepted connection from ', address)\n return client_sock, client_sock.makefile('r'), client_sock.makefile('w')\n else:\n sock = bluetooth.BluetoothSocket(bluetooth.RFCOMM)\n print('Connecting...')\n sock.connect((server_mac, port)) \n print('Connected to ', server_mac)\n return sock, sock.makefile('r'), sock.makefile('w')\n \n\ndef disconnect(sock):\n sock.close()\n \n \ndef write_to_socket(sock_out, color):\n sock_out.write(str(color) + '\\n')\n sock_out.flush()\n\n\ndef run(server_mac, is_master=True):\n sock, sock_in, sock_out = connect(server_mac, is_master)\n listener = threading.Thread(target=listen, args=(sock_in, sock_out))\n listener.start()\n \n global red_detected\n global yellow_detected\n global blue_detected\n \n red_socket_thread = threading.Thread(target=write_to_socket, args=(sock_out, RED))\n red_speak_thread = threading.Thread(target=speak, args=(RED,))\n \n yellow_socket_thread = threading.Thread(target=write_to_socket, args=(sock_out, YELLOW))\n yellow_speak_thread = threading.Thread(target=speak, args=(YELLOW,))\n \n blue_socket_thread = threading.Thread(target=write_to_socket, args=(sock_out, BLUE))\n blue_speak_thread = threading.Thread(target=speak, args=(BLUE,))\n \n while not colors_found():\n if detect_line() or detect_collision() or detect_proximity():\n collision_protocol()\n leds.set_color(\"LEFT\", \"GREEN\")\n leds.set_color(\"RIGHT\", \"GREEN\")\n \n if detect_color() == RED and not red_detected:\n red_socket_thread.start()\n red_speak_thread.start()\n red_detected = True\n \n if detect_color() == YELLOW and not yellow_detected:\n yellow_socket_thread.start()\n yellow_speak_thread.start()\n yellow_detected = True\n \n if detect_color() == BLUE and not blue_detected:\n blue_socket_thread.start()\n blue_speak_thread.start()\n blue_detected = True\n \n move_both(SPEED)\n \n MoveTank(OUTPUT_A, OUTPUT_D).off()\n# move_both(0)\n s.speak(\"Task finished\")\n \n disconnect(sock_in)\n disconnect(sock_out)\n disconnect(sock)\n\n\ndef listen(sock_in, sock_out):\n print('Now listening...')\n \n global red_detected\n global yellow_detected\n global blue_detected\n \n red_speak_thread = threading.Thread(target=speak, args=(RED,))\n yellow_speak_thread = threading.Thread(target=speak, args=(YELLOW,))\n blue_speak_thread = threading.Thread(target=speak, args=(BLUE,))\n \n while not colors_found():\n data = int(sock_in.readline())\n if(data == RED):\n red_speak_thread.start()\n red_detected = True\n if(data == YELLOW):\n yellow_speak_thread.start()\n yellow_detected = True\n if(data == BLUE):\n blue_speak_thread.start()\n blue_detected = True\n \n\ndef move_both_for_seconds(percent, seconds):\n MoveTank(OUTPUT_A, OUTPUT_D).on_for_seconds(SpeedPercent(percent),\n SpeedPercent(percent),\n seconds,\n brake=False,\n block=True)\n\n\ndef move_both(percent):\n MoveTank(OUTPUT_A, OUTPUT_D).on(SpeedPercent(percent),\n SpeedPercent(percent))\n\n\ndef turn_left(percent, seconds):\n LargeMotor(OUTPUT_D).on_for_seconds(SpeedPercent(percent),\n seconds, brake=False,\n block=True)\n\n\ndef turn_right(percent, seconds):\n LargeMotor(OUTPUT_A).on_for_seconds(SpeedPercent(percent),\n seconds, brake=False,\n block=True)\n\n\ndef detect_line():\n return cs.color == 1\n\n\ndef detect_collision():\n return ts_left.is_pressed or ts_right.is_pressed\n\n\ndef detect_proximity():\n return us.value() / 10 < 25\n\n\ndef detect_color():\n if cs.color == RED:\n return RED\n if cs.color == YELLOW:\n return YELLOW\n if cs.color == BLUE:\n return BLUE\n\n\ndef collision_protocol():\n leds.set_color(\"LEFT\", \"RED\") \n leds.set_color(\"RIGHT\", \"RED\")\n move_both_for_seconds(-SPEED, TIME)\n turn_left(-SPEED, TIME)\n \n\ndef colors_found():\n return red_detected and yellow_detected and blue_detected\n\n\nrun(server_mac, is_master)\n","sub_path":"rover.py","file_name":"rover.py","file_ext":"py","file_size_in_byte":5765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"384151613","text":"# -*- coding: utf-8 -*-\nimport tkinter as tk\nimport numpy as np\nimport math\n\nfrom copy import deepcopy\nfrom PIL import Image, ImageTk\nfrom tkinter import filedialog\n\nfrom ImgModel import ImgModel\n\nclass GUI(tk.Frame):\n #model\n imgModel = None\n\n inputImagePath = \"\"\n\n imageWidth = 480\n imageHeight = 270\n \n def __init__(self, master, imgModel):\n super().__init__(master, width=1000, height=1000)\n self.imgModel = imgModel\n self.pack()\n\n #选择图片路径\n self.selectInputImagePathButtonText = tk.StringVar()\n self.selectInputImagePathButtonText.set(\"请选择图片路径\")\n self.selectInputImagePathButton = tk.Button(self, textvariable=self.selectInputImagePathButtonText, command=self.__selectIamge)\n self.selectInputImagePathButton.grid(row = 0, column = 0, padx = 10, pady = 10)\n\n self.selectInputImagePathEntryText = tk.StringVar()\n self.selectInputImagePathEntryText.set(\"\")\n self.selectInputImagePathEntry = tk.Entry(self, textvariable=self.selectInputImagePathEntryText, width = 30)\n self.selectInputImagePathEntry.grid(row = 0, column = 1, columnspan = 2, padx = 10, pady = 10)\n\n #处理函数选择\n self.functionButtonList = []\n self.functionButtonNameList = [\"获取结果\"]\n self.functionButtonCommandList = [self.__getResult]\n for i in range(len(self.functionButtonNameList)):\n self.functionButtonList.append(tk.Button(self, text=self.functionButtonNameList[i], command=self.functionButtonCommandList[i]).grid(row = 2, column = 2 * i + 1, sticky=\"W\"))\n \n #参数输入框\n self.argsNameList = []\n self.argsValue = {\n }\n self.argsLabelList = []\n self.argsEntryList = []\n for i in range(len(self.argsNameList)):\n temp = tk.StringVar()\n temp.set(self.argsNameList[i])\n self.argsLabelList.append(tk.Label(self, textvariable = temp).grid(row = 1 , column = 2 * i, padx = 2, sticky=\"W\"))\n temp = tk.StringVar()\n temp.set(self.argsValue[self.argsNameList[i]])\n self.argsValue[self.argsNameList[i]] = temp\n self.argsEntryList.append(tk.Entry(self, textvariable = temp, width = 3).grid(row = 1, column = 2 * i + 1, sticky=\"W\"))\n\n #显示图片\n self.selectInputImagePathLabelText = tk.StringVar()\n self.selectInputImagePathLabelText.set(\"输入图片\")\n self.selectInputImagePathLabel = tk.Label(self, textvariable=self.selectInputImagePathLabelText)\n self.selectInputImagePathLabel.grid(row = 5, column = 0)\n\n self.pilInputImage = Image.fromarray(np.zeros((self.imageHeight, self.imageWidth))).convert(\"L\")\n self.tkInputImage = ImageTk.PhotoImage(image=self.pilInputImage)\n self.inputImageLabel = tk.Label(self, image=self.tkInputImage)\n self.inputImageLabel.grid(row = 6, column = 0, columnspan = 6)\n\n self.selectOutputImagePathLabelText = tk.StringVar()\n self.selectOutputImagePathLabelText.set(\"处理后图片\")\n self.selectOutputImagePathLabel = tk.Label(self, textvariable=self.selectOutputImagePathLabelText)\n self.selectOutputImagePathLabel.grid(row = 5, column = 6)\n\n self.pilOutputImage = Image.fromarray(np.zeros((self.imageHeight, self.imageWidth))).convert(\"L\")\n self.tkOutputImage = ImageTk.PhotoImage(image=self.pilOutputImage)\n self.outputImageLabel = tk.Label(self, image=self.tkOutputImage)\n self.outputImageLabel.grid(row = 6, column = 6, columnspan = 6)\n\n \n\n #事件函数\n def __selectIamge(self):\n filePath = filedialog.askopenfilename()\n if(filePath != \"\"):\n self.inputImagePath = filePath\n self.selectInputImagePathEntryText.set(filePath)\n\n self.pilInputImage = Image.open(self.inputImagePath).convert(\"L\")\n self.pilInputImage = self.__resize(self.pilInputImage, self.imageWidth, self.imageHeight)\n self.imgModel.setPilImage(self.pilInputImage)\n self.tkInputImage = ImageTk.PhotoImage(image=self.pilInputImage)\n self.inputImageLabel = tk.Label(self, image=self.tkInputImage)\n self.inputImageLabel.grid(row = 6, column = 0, columnspan = 6)\n \n\n self.pilOutputImage = Image.fromarray(np.zeros((self.imageHeight, self.imageWidth))).convert(\"L\")\n self.tkOutputImage = ImageTk.PhotoImage(image=self.pilOutputImage)\n self.outputImageLabel = tk.Label(self, image=self.tkOutputImage)\n self.outputImageLabel.grid(row = 6, column = 6, columnspan = 6)\n else:\n self.pilOutputImage = Image.fromarray(np.zeros((self.imageHeight, self.imageWidth))).convert(\"L\")\n self.tkOutputImage = ImageTk.PhotoImage(image=self.pilOutputImage)\n self.outputImageLabel = tk.Label(self, image=self.tkOutputImage)\n self.outputImageLabel.grid(row = 6, column = 6, columnspan = 6)\n \n return filePath\n\n def __getResult(self):\n self.imgModel.identify()\n self.tkOutputImage = ImageTk.PhotoImage(image=imgModel.getPilImage())\n self.outputImageLabel = tk.Label(self, image=self.tkOutputImage)\n self.outputImageLabel.grid(row = 6, column = 6, columnspan = 6)\n return \n\n def __resize(self, pilImage, width, height):\n widthRate = pilImage.size[0] / width\n heightRate = pilImage.size[1] / height\n rate = max(widthRate, heightRate)\n pilImage = pilImage.resize((int(pilImage.size[0] / rate), int(pilImage.size[1] / rate)))\n\n return pilImage\n\n\nif __name__ == \"__main__\":\n root = tk.Tk()\n root.title(\"test\")\n imgModel = ImgModel()\n app = GUI(root, imgModel)\n app.mainloop()\n\n\n\n","sub_path":"backend/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":5795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"207806888","text":"from explicit_wait import MyWaits\nfrom selenium.webdriver.common.alert import Alert\nimport time\n\n\nurl = \"http://localhost//admin/\"\n\n\ndef test_login(browser):\n \"\"\" Check login \"\"\"\n my_waits = MyWaits(browser)\n browser.get(url)\n # login\n my_waits.element(\"#input-username\").send_keys(\"user\")\n # password\n my_waits.element(\"#input-password\").send_keys(\"bitnami1\")\n # button of login\n btn_login = my_waits.element(\".btn.btn-primary\")\n btn_login.click()\n # Find user account on page\n my_waits.element(\"#user-profile\", timeout=15) # big timeout special for firefox\n\n\ndef test_unlogin(browser):\n \"\"\" Check unlogin \"\"\"\n my_waits = MyWaits(browser)\n # Login\n test_login(browser)\n # Logout\n my_waits.element(\".fa.fa-sign-out\").click()\n # Find login on page\n my_waits.element(\"#input-username\")\n\n\ndef test_products_table(browser):\n \"\"\" Check transfer to Catalog/Products and check table \"\"\"\n my_waits = MyWaits(browser)\n # Login\n test_login(browser)\n # Press Catalog\n my_waits.element(\".fa.fa-tags.fw\").click()\n # Press Products\n catalog = browser.find_element_by_id(\"collapse1\")\n catalog_table = catalog.find_elements_by_tag_name(\"li\")\n for el in catalog_table:\n if el.text == \"Products\":\n btn_products = el\n btn_products.click()\n break\n # Check table is\n my_waits.element(\"#form-product\")\n\n\ndef test_products_add_product(browser):\n \"\"\" Check add product to products table \"\"\"\n my_waits = MyWaits(browser)\n # Go to page products table (General page)\n test_products_table(browser)\n # Press Add Product Button\n browser.find_element_by_css_selector(\".btn.btn-primary\").click()\n # Enter Product Name\n my_waits.element(\"#input-name1\").send_keys(\"ex_name_1\")\n # Enter Product Tag Name\n browser.find_element_by_css_selector(\"#input-meta-title1\").send_keys(\"ex_tag_1\")\n # Go to Data page\n table_title = browser.find_element_by_css_selector(\"#form-product\")\n table_title_list = table_title.find_elements_by_tag_name(\"li\")\n for el in table_title_list:\n if el.text == \"Data\":\n btn_data = el\n btn_data.click()\n break\n # Enter Product Name\n my_waits.element(\"#input-model\").send_keys(\"ex_model_1\")\n # Press Save Button\n browser.find_elements_by_css_selector(\".pull-right\")[3].find_element_by_tag_name(\"button\").click()\n # Check success after add product\n my_waits.element(\".alert.alert-success.alert-dismissible\")\n assert \\\n browser.find_element_by_css_selector(\".alert.alert-success.alert-dismissible\").get_property(\"innerText\")\\\n == \\\n \" Success: You have modified products!\\n×\"\n\n\ndef test_products_modify_product(browser):\n \"\"\" Check modify product in products table \"\"\"\n my_waits = MyWaits(browser)\n test_products_add_product(browser)\n # Find line with product\n lines = browser.find_element_by_css_selector(\".table.table-bordered.table-hover\").find_elements_by_tag_name(\"tr\")\n flag_out_of_loop = False\n for line in lines:\n elements = line.get_property(\"cells\")\n for el in elements:\n if el.get_property(\"innerText\") == \"ex_name_1\":\n # Press Edit Button\n line.find_element_by_css_selector(\".fa.fa-pencil\").click()\n flag_out_of_loop = True\n break\n if flag_out_of_loop:\n break\n # Clear Product Name\n my_waits.element(\"#input-name1\").clear()\n # Enter new Product Name\n my_waits.element(\"#input-name1\").send_keys(\"1_new_ex_name_1\")\n # Press Save Button\n browser.find_elements_by_css_selector(\".pull-right\")[3].find_element_by_tag_name(\"button\").click()\n # Check success after add product\n my_waits.element(\".alert.alert-success.alert-dismissible\")\n assert \\\n browser.find_element_by_css_selector(\".alert.alert-success.alert-dismissible\").get_property(\"innerText\") \\\n == \\\n \" Success: You have modified products!\\n×\"\n\n\ndef test_products_remove_product(browser):\n \"\"\" Check remove product from products table \"\"\"\n my_waits = MyWaits(browser)\n test_products_add_product(browser)\n # Find line with product\n lines = browser.find_element_by_css_selector(\".table.table-bordered.table-hover\").find_elements_by_tag_name(\"tr\")\n for line in lines:\n if \"ex_name_1\" in line.get_property(\"innerText\"):\n # Select product for remove\n line.find_elements_by_tag_name(\"td\")[0].click()\n flag_out_of_loop = True\n break\n # Press Trash Button\n browser.find_element_by_css_selector(\".fa.fa-trash-o\").click()\n # Confirm alert message\n Alert(browser).accept()\n # Check success after remove product\n my_waits.element(\".alert.alert-success.alert-dismissible\")\n assert \\\n browser.find_element_by_css_selector(\".alert.alert-success.alert-dismissible\").get_property(\"innerText\") \\\n == \\\n \" Success: You have modified products!\\n×\"\n\n\ndef test_element_by_id(browser):\n my_waits = MyWaits(browser)\n browser.get(url)\n my_waits.element(\"#footer\")\n # browser.find_element(By.ID, \"footer\")\n my_waits.element(\"#footer\")\n my_waits.element(\"#header-logo\")\n # browser.find_element_by_id(\"header\").find_element_by_id(\"header-logo\")\n\n\ndef test_element_by_name(browser):\n my_waits = MyWaits(browser)\n browser.get(url)\n my_waits.element(\"[name=username]\")\n # browser.find_element_by_name(\"username\")\n\n\ndef test_elements_by_css_selector(browser):\n my_waits = MyWaits(browser)\n browser.get(url)\n my_waits.elements(\".input-group-addon\")\n # browser.find_elements(By.CSS_SELECTOR, \".input-group-addon\")\n\n\ndef test_element_by_class_name_selector(browser):\n my_waits = MyWaits(browser)\n browser.get(url)\n my_waits.elements(\".fa.fa-user\")\n # browser.find_elements_by_class_name(\"fa fa-user\")\n","sub_path":"PythonQAOtus_Lesson11/5_find_elements_login_admin/test_selenium_5.py","file_name":"test_selenium_5.py","file_ext":"py","file_size_in_byte":5919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"624086631","text":"import datetime\nimport logging\nimport json\n\nfrom django.db import models\nfrom django.conf import settings\nfrom django.dispatch import receiver\nfrom django.db.models.signals import post_save\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.utils import timezone\n\nfrom djcelery import models as djcelery_models\n\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass Host(models.Model):\n name = models.CharField(max_length=100)\n\n def __unicode__(self):\n return self.name\n\n\nclass Job(models.Model):\n periodic_task = models.OneToOneField(\n djcelery_models.PeriodicTask, related_name='+', null=True, blank=True)\n owner = models.ForeignKey(settings.AUTH_USER_MODEL)\n\n script = models.TextField()\n deleted = models.BooleanField(default=False)\n\n @property\n def name(self):\n return self.periodic_task.name if self.periodic_task else None\n\n @property\n def description(self):\n return self.periodic_task.description if self.periodic_task else None\n\n @property\n def enabled(self):\n return self.periodic_task.enabled if self.periodic_task else None\n\n @property\n def interval(self):\n return self.periodic_task.interval if self.periodic_task else None\n\n @property\n def crontab(self):\n return self.periodic_task.crontab if self.periodic_task else None\n\n @property\n def expires(self):\n return self.periodic_task.expires if self.periodic_task else None\n\n @property\n def last_run_at(self):\n return self.periodic_task.last_run_at if self.periodic_task else None\n\n @property\n def run_count(self):\n return self.jobs.count()\n\n @property\n def run_success(self):\n return self.jobs.filter(rc=0).count()\n\n @property\n def run_failures(self):\n return self.jobs.exclude(rc=0).count()\n\n @property\n def run_success_average(self):\n total = self.run_count\n return 100 * self.run_success / float(total) if total else 0\n\n @property\n def run_failure_average(self):\n total = self.run_count\n return 100 * self.run_failures / float(total) if total else 0\n\n @property\n def executions(self):\n return self.jobs.all().order_by('-start')\n\n @property\n def serialized(self):\n def obj2dict(obj):\n if obj is None:\n return {}\n result = {}\n for k, v in obj.__dict__.items():\n if k.startswith('_'):\n continue\n is_date = isinstance(v, datetime.datetime)\n result[k] = str(v) if is_date else v\n return result\n\n data = obj2dict(self.periodic_task)\n data.update(obj2dict(self))\n\n return json.dumps(data)\n\n def __unicode__(self):\n return str(self.description or self.name)\n\n @classmethod\n def add_crontab_line(cls, responsible, script,\n minute, hour, dow, dom, mon, name=None):\n crontab, _ = djcelery_models.CrontabSchedule.objects.get_or_create(\n minute=minute,\n hour=hour,\n day_of_week=dow,\n day_of_month=dom,\n month_of_year=mon,\n )\n name = name or 'Autogenerated on {0}'.format(timezone.now())\n\n job, created = cls.objects.get_or_create(\n script=script,\n deleted=False,\n defaults={'owner': responsible}\n )\n\n if not created:\n LOGGER.warning(\"The job already exists and won't be overriden\")\n return False\n\n periodic_task = djcelery_models.PeriodicTask(\n name=name,\n task='Cron worker',\n crontab=crontab,\n kwargs=json.dumps({'job_id': job.id}),\n )\n\n periodic_task.save()\n job.periodic_task = periodic_task\n job.save()\n return True\n\n def delete(self):\n LOGGER.info('Marking job \"%s\" as deleted', self)\n\n self.periodic_task.enabled = False\n self.periodic_task.save()\n\n self.deleted = True\n self.save()\n\n def purge(self):\n LOGGER.warning('Removing job \"%s\" definitelly', self)\n # TO BE DONE\n return\n # if instance.periodic_task:\n # instance.periodic_task.delete()\n # log.save()\n\n\nclass Execution(models.Model):\n STATUS_LAUNCHED = 'l'\n STATUS_SUCCESS = 's'\n STATUS_ERROR = 'e'\n\n STATUS_CHOICES = (\n (STATUS_LAUNCHED, _('waiting executor')),\n (STATUS_SUCCESS, _('finished')),\n (STATUS_ERROR, _('error')),\n )\n\n job = models.ForeignKey(Job, related_name='jobs')\n\n status = models.CharField(max_length=1, choices=STATUS_CHOICES)\n flow_id = models.CharField(max_length=100)\n\n rc = models.IntegerField(null=True, blank=True)\n stdout = models.TextField(null=True, blank=True)\n stderr = models.TextField(null=True, blank=True)\n\n created = models.DateTimeField(auto_now_add=True)\n start = models.DateTimeField(null=True, blank=True)\n end = models.DateTimeField(null=True, blank=True)\n\n host = models.ForeignKey(Host, related_name='executions',\n null=True, blank=True)\n\n @property\n def elapsed(self):\n return (self.end - self.start).total_seconds() if self.end else ''\n\n @property\n def queued(self):\n return (self.start - self.created).total_seconds() if self.end else ''\n\n @property\n def total_time(self):\n return (self.end - self.created).total_seconds() if self.end else ''\n\n def __unicode__(self):\n return '{host} {start}-{end} {name}'.format(\n name=self.job.name, start=self.start, end=self.end, host=self.host)\n\n def __str__(self):\n return self.__unicode__()\n\n\nclass Log(models.Model):\n ACTION_CREATE = 'C'\n ACTION_UPDATE = 'U'\n ACTION_DELETE = 'D'\n ACTIONS = (\n (ACTION_CREATE, 'Create'),\n (ACTION_UPDATE, 'Update'),\n (ACTION_DELETE, 'Delete'),\n )\n\n responsible = models.ForeignKey(settings.AUTH_USER_MODEL)\n action = models.CharField(max_length=1, choices=ACTIONS)\n job = models.ForeignKey(Job, related_name='logs')\n\n change = models.TextField(null=True, blank=True)\n\n created = models.DateTimeField(auto_now_add=True)\n updated = models.DateTimeField(auto_now=True)\n\n @property\n def change_obj(self):\n obj = self._change_obj\n crontab_id = obj.get('crontab_id')\n interval_id = obj.get('interval_id')\n\n keys = (\n 'enabled', 'deleted', 'name', 'description', 'expires',\n 'queue',\n )\n\n for k in keys:\n yield k.capitalize(), obj.get(k)\n\n if crontab_id is not None:\n crontabmodel = djcelery_models.CrontabSchedule\n crontab = crontabmodel.objects.get(id=crontab_id)\n yield 'Crontab', crontab\n if interval_id is not None:\n intervalmodel = djcelery_models.IntervalSchedule\n interval = intervalmodel.objects.get(id=interval_id)\n yield 'Interval', interval\n\n @property\n def script(self):\n return self._change_obj.get('script')\n\n @property\n def _change_obj(self):\n try:\n return json.loads(self.change)\n except Exception:\n LOGGER.exception('Parsing log changes')\n return {}\n\n def __unicode__(self):\n return (\n '{user} {action}d job {job} at {updated}'\n .format(\n user=self.responsible,\n action=self.get_action_display().lower(),\n job=self.job,\n updated=self.updated or self.created,\n )\n )\n\n def __str__(self):\n return self.__unicode__()\n\n\n@receiver(post_save, sender=Job)\ndef post_save_job(instance, created, **kwargs):\n if created:\n action = Log.ACTION_CREATE\n elif instance.deleted:\n action = Log.ACTION_DELETE\n else:\n action = Log.ACTION_UPDATE\n\n log = Log(\n responsible=instance.owner,\n action=action,\n job=instance,\n change=instance.serialized,\n )\n log.save()\n","sub_path":"djcron_server/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":8062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"316543690","text":"from __future__ import print_function\nimport os\nimport sys\nimport logging\nimport argparse\nimport time\nfrom time import strftime\nimport torch\nimport torch.optim as optim\nfrom torchvision import datasets, transforms\n\nfrom models.resnet_1d import ResNet18_1d, ResNet34_1d, ResNet50_1d\n\nimport admm\nfrom admm import GradualWarmupScheduler\nfrom admm import CrossEntropyLossMaybeSmooth\nfrom admm import mixup_data, mixup_criterion\nfrom testers import *\n\nfrom TrainValTest import TrainValTest\n\ndef str2bool(v):\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')\n\ndef check_and_create(dir_path):\n if os.path.exists(dir_path):\n return True\n else:\n os.makedirs(dir_path)\n return False\n \n# Training settings\nparser = argparse.ArgumentParser(description='PyTorch CIFAR10 admm training')\nparser.add_argument('--logger', action='store_true', default=True,\n help='whether to use logger')\nparser.add_argument('--arch', type=str, default=None,\n help='[vgg, resnet, convnet, alexnet]')\nparser.add_argument('--depth', default=None, type=int,\n help='depth of the neural network, 16,19 for vgg; 18, 50 for resnet')\nparser.add_argument('--s', type=float, default=0.0001,\n help='scale sparse rate (default: 0.0001)')\nparser.add_argument('-j', '--workers', default=4, type=int, metavar='N',\n help='number of data loading workers (default: 4)')\nparser.add_argument('--multi-gpu', action='store_true', default=False,\n help='for multi-gpu training')\nparser.add_argument('--batch-size', type=int, default=64, metavar='N',\n help='input batch size for training (default: 64)')\nparser.add_argument('--test-batch-size', type=int, default=256, metavar='N',\n help='input batch size for testing (default: 256)')\nparser.add_argument('--epochs', type=int, default=100, metavar='N',\n help='number of epochs to train (default: 160)')\nparser.add_argument('--admm-epochs', type=int, default=1, metavar='N',\n help='number of interval epochs to update admm (default: 1)')\nparser.add_argument('--optmzr', type=str, default='sgd', metavar='OPTMZR',\n help='optimizer used (default: adam)')\nparser.add_argument('--lr', type=float, default=0.001, metavar='LR',\n help='learning rate (default: 0.1)')\nparser.add_argument('--lr-decay', type=int, default=30, metavar='LR_decay',\n help='how many every epoch before lr drop (default: 30)')\nparser.add_argument('--momentum', type=float, default=0.9, metavar='M',\n help='SGD momentum (default: 0.9)')\nparser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,\n metavar='W', help='weight decay (default: 1e-4)')\nparser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\nparser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 1)')\nparser.add_argument('--log-interval', type=int, default=100, metavar='N',\n help='how many batches to wait before logging training status')\nparser.add_argument('--load-model', type=str, default=\"\",\n help='For loading the exist Model')\nparser.add_argument('--load-mask', type=str, default=\"\",\n help='For loading the exist Model')\nparser.add_argument('--save-model', type=str, default=\"\",\n help='For Saving the current Model')\nparser.add_argument('--masked-retrain', action='store_true', default=False,\n help='for masked retrain')\nparser.add_argument('--verbose', action='store_true', default=True,\n help='whether to report admm convergence condition')\nparser.add_argument('--admm', action='store_true', default=False,\n help=\"for admm training\")\nparser.add_argument('--rho', type=float, default = 0.0001,\n help =\"define rho for ADMM\")\nparser.add_argument('--rho-num', type=int, default = 5,\n help =\"define how many rohs for ADMM training\")\nparser.add_argument('--sparsity-type', type=str, default='random-pattern',\n help =\"define sparsity_type: [irregular,column,filter,pattern,random-pattern]\")\nparser.add_argument('--config-file', type=str, default='config_vgg16',\n help =\"config file name\")\nparser.add_argument('--combine-progressive', default=False, type=str2bool,\n help=\"for filter pruning after column pruning\")\nparser.add_argument('--purification', default=False, type=str2bool,\n help=\"purification after pruning\")\n\nparser.add_argument('--lr-scheduler', type=str, default='default',\n help='define lr scheduler')\nparser.add_argument('--warmup', action='store_true', default=False,\n help='warm-up scheduler')\nparser.add_argument('--warmup-lr', type=float, default=0.0001, metavar='M',\n help='warmup-lr, smaller than original lr')\nparser.add_argument('--warmup-epochs', type=int, default=0, metavar='M',\n help='number of epochs for lr warmup')\nparser.add_argument('--mixup', action='store_true', default=False,\n help='ce mixup')\nparser.add_argument('--alpha', type=float, default=0.0, metavar='M',\n help='for mixup training, lambda = Beta(alpha, alpha) distribution. Set to 0.0 to disable')\nparser.add_argument('--smooth', action='store_true', default=False,\n help='lable smooth')\nparser.add_argument('--smooth-eps', type=float, default=0.0, metavar='M',\n help='smoothing rate [0.0, 1.0], set to 0.0 to disable')\nparser.add_argument('--no-tricks', action='store_true', default=False,\n help='disable all training tricks and restore original classic training process')\n\n########### From RFMLS: multi-gpu; batch-size\nparser.add_argument('--exp_name', default='exp1', type=str, help='Specify the experiment name')\nparser.add_argument('--base_path', default='/scratch/RFMLS/dataset100/dataset_with_val_9000train/', type=str, help='Specify the base path')\nparser.add_argument('--save_path', default='/scratch/zhou.fan1/filtered/', type=str, help='Specify the save path')\nparser.add_argument('--file_type', default='mat', type=str, help='Specify type of file you want to read')\nparser.add_argument('--decimated', default=False, type=str2bool, help='Specify if the data in the files is decimated, if so and you are using the same stats file as the undecimated then the generator will take this into account')\nparser.add_argument('--val_from_train', default=False, type=str2bool, help='If validation not present in partition file, generate one from the training set. (If false, use test set as validation)')\nparser.add_argument('--test_on_val_set',default=False, type=str2bool, help='If true it will test the trained model on validation data, for tuning hyperparameters')\nparser.add_argument('--train', default=False, type=str2bool, help='Specify doing training or not')\n \nparser.add_argument('-ss', '--slice_size', default=1024, type=int, help='Specify the slice size')\nparser.add_argument('-d', '--devices', default=100, type=int, help='Specify the number of total devices')\nparser.add_argument('--cnn_stack', default=3, type=int, help='[Baseline Model] Specify the number of cnn layers')\nparser.add_argument('--fc_stack', default=2, type=int, help='[Baseline Model] Specify the number of fc layers')\nparser.add_argument('--channels', default=128, type=int, help='[Baseline Model] Specify the number of channels of cnn')\nparser.add_argument('--fc1', default=256, type=int, help='[Baseline Model] Specify the number of neurons in the first fc layer')\nparser.add_argument('--fc2', default=128, type=int, help='[Baseline Model] Specify the number of neurons in the penultimate fc layer')\n\n# Data Generator \nparser.add_argument('--generator', default='new', type=str, help='Specify which generator to use')\nparser.add_argument('--add_padding', default=False, type=str2bool, help='If examples are smaller than slice size addpadding')\nparser.add_argument('--padding_type', default='zero', type=str, help='\"zero\"-padding and \"stride\"-padding')\nparser.add_argument('--try_concat', default=False, type=str2bool, help='If examples are smaller than slice size and using demodulated data, try and concat them')\nparser.add_argument('--preprocessor', default='no', type=str, help='Specify which preprocessor to use')\nparser.add_argument('--K', default=1, type=int, help='Specify the batch down sampling factor K')\nparser.add_argument('-fpio', '--files_per_IO', default=500000, type = int, help='Specify the files loaded to memory per IO')\nparser.add_argument('--shrink', default=1, type=float, help='Dataset down sampling factor')\nparser.add_argument('--normalize', default='True', type=str2bool, help='Specify if you want to normalize the data using mean and std in stats files (if stats does not have this info, it is ignored)')\nparser.add_argument('--crop', default=0, type=int, help='if crop > 0 the generator crops the examples to a maximum length of crop')\nparser.add_argument('--training_strategy', default='big', type=str, help='Specify which sampling strategy to use')\nparser.add_argument('--sampling', default='model', type=str, help='Specify which sampling strategy to use')\nparser.add_argument('--fir_size', default=11, type=int, help='FIR filter size.')\nparser.add_argument('--use_preamble', default=False, type=str2bool, help='Using preamble to train channel-removing filter')\nparser.add_argument('--merge_preamble', default=False, type=str2bool, help='Merge preamble with slice to train channel-removing filter')\n\nparser.add_argument('--id_gpu', default=0, type=int, help='If --multigpu=False, this arguments specify which gpu to use.')\nparser.add_argument('--test_stride', default=16, type=int, help='Specify the stride to use for testing')\nparser.add_argument('--per_example_strategy', default='prob_sum', type=str, help='Specify the strategy used to compute the per wxample accuracy: (majority, prob_sum, log_prob_sum, all)')\n\n################## augmentation parameters #####################\nparser.add_argument('--aug_var', default='0.0434', type=float, help='variance of noise for data augmentation')\nparser.add_argument('--aug_mean', default='0.045', type=float, help='mean of noise for data augmentation')\nparser.add_argument('--aug_taps', default=11, type=int, help='Number of complex taps for data augmentation')\nparser.add_argument('--aug_granularity', default='per_ex', type=str, help='granularity of fir selection for training pipelinecan be per_ex, per_batch, per_slice')\nargs = parser.parse_args()\n\nargs.cuda = not args.no_cuda and torch.cuda.is_available()\nif args.cuda:\n torch.cuda.manual_seed(args.seed)\nuse_cuda = not args.no_cuda and torch.cuda.is_available()\ndevice = torch.device(\"cuda:0\" if use_cuda else \"cpu\")\nkwargs = {'num_workers': args.workers, 'pin_memory': True} if args.cuda else {}\nwriter = None\nprint('Use Cuda:',use_cuda)\n# ------------------ save path ----------------------------------------------\nargs.save_path_exp = args.save_path\ncheck_and_create(args.save_path_exp)\nsetting_file = os.path.join(args.save_path_exp, args.exp_name+'.config')\n\nprint(\"*************** Configuration ***************\")\nwith open(setting_file, 'w') as f:\n args_dic = vars(args)\n for arg, value in args_dic.items():\n line = arg + ' : ' + str(value)\n print(line)\n f.write(line+'\\n')\n\n### Data Loader ###\npipeline = TrainValTest(base_path=args.base_path, save_path=args.save_path_exp,\n val_from_train=args.val_from_train)\npipeline.load_data(sampling=args.sampling)\ntrain_loader = pipeline.GenerateData(args.batch_size, args.slice_size, args.K, args.files_per_IO, \n generator_type=args.generator, processor_type=args.preprocessor, \n training_strategy = args.training_strategy,\n file_type=args.file_type, normalize=args.normalize, \n decimated=args.decimated, add_padding=args.add_padding,\n padding_type=args.padding_type, try_concat=args.try_concat,\n crop=args.crop,\n use_preamble=args.use_preamble, aug_var=args.aug_var,\n aug_mean=args.aug_mean, aug_taps=args.aug_taps)\n# set up model archetecture\nif args.arch == \"resnet\":\n if args.depth == 50:\n model = ResNet50_1d(args.slice_size,args.devices)\n if args.depth == 34:\n model = ResNet34_1d(args.slice_size,args.devices)\n if args.depth == 18:\n model = ResNet18_1d(args.slice_size,args.devices)\n print(model)\n \nif args.multi_gpu:\n model = torch.nn.DataParallel(model)\nmodel.cuda()\nif args.load_model:\n original_model_name = args.load_model\n print(\"\\n>_ Loading baseline/progressive model..... {}\\n\".format(original_model_name))\n model.load_state_dict(torch.load(original_model_name))\n \nif args.train:\n print('*************** Training Model ***************')\n test_column_sparsity(model)\n optimizer_init_lr = 0.0001\n best_acc = 0\n optimizer = torch.optim.Adam(model.parameters(), optimizer_init_lr)\n criterion = torch.nn.CrossEntropyLoss()\n for epoch in range(1, 20):\n start = time.time()\n \n #adjust learning rate\n lr = optimizer_init_lr * (0.5 ** (epoch // 3))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n \n model = pipeline.train_model(args, model, train_loader, criterion, optimizer, epoch)\n end_train = time.time()\n acc_slice, acc_ex, preds = pipeline.test_model(args, model)\n end_test = time.time()\n print(\"Training time: {:.3f}; Testing time: {:.3f}; Testing slice-acc: {:.3f}; Testing exp-acc: {:.3f}\".format(end_train-start, end_test-end_train, acc_slice, acc_ex))\n\n if acc_ex > best_acc:\n best_acc = acc_ex\n print(\"Saving model...\\n\")\n torch.save(model.state_dict(), args.save_path_exp+\"/{}{}.pt\".format(\n args.arch, args.depth))\n\nelse:\n print('*************** Not Training Model ***************')\n acc_slice, acc_ex, preds = pipeline.test_model(args, model)\n print(\"Testing slice-acc: {:.4f}; Testing exp-acc: {:.4f}\".format(acc_slice, acc_ex))\n test_column_sparsity(model)\n test_filter_sparsity(model)\n \n\"\"\" disable all bag of tricks\"\"\"\nif args.no_tricks:\n # disable all trick even if they are set to some value\n args.lr_scheduler = \"default\"\n args.warmup = False\n args.mixup = False\n args.smooth = False\n args.alpha = 0.0\n args.smooth_eps = 0.0\n\n\ndef main():\n if (args.admm and args.masked_retrain):\n raise ValueError(\"can't do both masked retrain and admm\")\n \n elif (not args.admm) and (not args.masked_retrain) and args.purification:\n print(\"Model Purification\")\n post_column_prune(model,0.04)\n post_filter_prune(model,0.23)\n #acc_slice, acc_ex, preds = pipeline.test_model(args,model)\n rate = test_filter_sparsity(model)\n \n print(\"Testing slice-acc: {:.3f}; Testing exp-acc: {:.3f}\".format(acc_slice, acc_ex))\n torch.save(model.state_dict(), args.save_path_exp+\"/prunned_{}_{}.pt\".format(acc_ex, rate))\n sys.exit(1)\n \n \n print(\"The config arguments showed as below:\")\n print(args)\n\n \"\"\" bag of tricks set-ups\"\"\"\n criterion = CrossEntropyLossMaybeSmooth(smooth_eps=args.smooth_eps).cuda()\n args.smooth = args.smooth_eps > 0.0\n args.mixup = args.alpha > 0.0\n\n optimizer_init_lr = args.warmup_lr if args.warmup else args.lr\n\n optimizer = None\n if args.optmzr == 'sgd':\n optimizer = torch.optim.SGD(model.parameters(), optimizer_init_lr, momentum=0.9, weight_decay=1e-4)\n elif args.optmzr == 'adam':\n optimizer = torch.optim.Adam(model.parameters(), optimizer_init_lr)\n\n scheduler = None\n if args.lr_scheduler == 'cosine':\n scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.epochs * len(train_loader), eta_min=4e-08)\n elif args.lr_scheduler == 'default':\n # my learning rate scheduler for cifar, following https://github.com/kuangliu/pytorch-cifar\n epoch_milestones = [65, 100, 130, 190, 220, 250, 280]\n\n \"\"\"Set the learning rate of each parameter group to the initial lr decayed\n by gamma once the number of epoch reaches one of the milestones\n \"\"\"\n scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[i * len(train_loader) for i in epoch_milestones], gamma=0.5)\n else:\n raise Exception(\"unknown lr scheduler\")\n\n if args.warmup:\n scheduler = GradualWarmupScheduler(optimizer, multiplier=args.lr / args.warmup_lr, total_iter=args.warmup_epochs * len(train_loader), after_scheduler=scheduler)\n\n\n \"\"\"=====================\"\"\"\n \"\"\" multi-rho admm train\"\"\"\n \"\"\"=====================\"\"\"\n initial_rho = args.rho\n if args.admm:\n admm_prune(initial_rho, criterion, optimizer, scheduler)\n\n\n \"\"\"==============\"\"\"\n \"\"\"masked retrain\"\"\"\n \"\"\"==============\"\"\"\n if args.masked_retrain:\n masked_retrain(initial_rho, criterion, optimizer, scheduler)\n \n \n\n\ndef admm_prune(initial_rho, criterion, optimizer, scheduler):\n for i in range(args.rho_num):\n current_rho = initial_rho * 10 ** i\n if i == 0:\n original_model_name = args.load_model\n print(\"\\n>_ Loading baseline/progressive model..... {}\\n\".format(original_model_name))\n model.load_state_dict(torch.load(original_model_name)) # admm train need basline model\n else:\n model.load_state_dict(torch.load(args.save_path_exp+\"/prunned_{}{}_{}_{}_{}_{}.pt\".format(\n args.arch, args.depth, current_rho / 10, args.config_file, args.optmzr, args.sparsity_type)))\n model.cuda()\n\n ADMM = admm.ADMM(model, file_name=\"./profile/\" + args.config_file + \".yaml\", rho=current_rho)\n admm.admm_initialization(args, ADMM=ADMM, model=model) # intialize Z variable\n\n # admm train\n best_prec1 = 0.\n\n for epoch in range(1, args.epochs + 1):\n print(\"current rho: {}\".format(current_rho))\n train(ADMM, train_loader, criterion, optimizer, scheduler, epoch, args)\n #t_loss, prec1 = test(model, criterion, test_loader)\n acc_slice, acc_ex, preds = pipeline.test_model(args,model)\n print(\"Testing slice-acc: {:.3f}; Testing exp-acc: {:.3f}\".format(acc_slice, acc_ex))\n \n best_prec1 = max(acc_ex, best_prec1)\n\n print(\"Best Acc: {:.4f}%\".format(best_prec1))\n print(\"Saving model...\\n\")\n torch.save(model.state_dict(), args.save_path_exp+\"/prunned_{}{}_{}_{}_{}_{}.pt\".format(\n args.arch, args.depth, current_rho, args.config_file, args.optmzr, args.sparsity_type))\n\n\n\ndef masked_retrain(initial_rho, criterion, optimizer, scheduler):\n \n if args.load_mask:\n '''\n Load pre-mask and added to the full model\n '''\n print(\"\\n>_ Loading Mask: \"+ args.load_mask)\n mask = torch.load(args.load_mask)\n for name, W in (model.named_parameters()):\n if name in mask and W.shape==mask[name].shape:\n weight = mask[name].cpu().detach().numpy()\n non_zeros = weight != 0\n non_zeros = non_zeros.astype(np.float32)\n zero_mask = torch.from_numpy(non_zeros).cuda()\n W.data *= zero_mask\n test_column_sparsity(model)\n \n else:\n print(\"\\n>_ Loading file: \"+args.save_path_exp+\"/prunned_{}{}_{}_{}_{}_{}.pt\".format(\n args.arch, args.depth, initial_rho * 10 ** (args.rho_num - 1), args.config_file, args.optmzr,\n args.sparsity_type))\n model.load_state_dict(torch.load(args.save_path_exp+\"/prunned_{}{}_{}_{}_{}_{}.pt\".format(\n args.arch, args.depth, initial_rho * 10 ** (args.rho_num - 1), args.config_file, args.optmzr,\n args.sparsity_type)))\n model.cuda()\n \n ADMM = admm.ADMM(model, file_name=\"./profile/\" + args.config_file + \".yaml\", rho=initial_rho)\n print(ADMM.prune_ratios)\n best_prec1 = [0]\n admm.hard_prune(args, ADMM, model)\n epoch_loss_dict = {}\n testAcc = []\n\n for epoch in range(1, args.epochs + 1):\n idx_loss_dict = train(ADMM, train_loader, criterion, optimizer, scheduler, epoch, args)\n acc_slice, prec1, preds = pipeline.test_model(args,model)\n print(\"Testing slice-acc: {:.4f}; Testing exp-acc: {:.4f}\".format(acc_slice, prec1))\n #rate = test_filter_sparsity(model)\n #t_loss, prec1 = test(model, criterion, test_loader)\n \n if prec1 > max(best_prec1):\n print(\"\\n>_ Got better accuracy, saving model with accuracy {:.3f}% now...\\n\".format(prec1))\n torch.save(model.state_dict(), args.save_path_exp+\"/retrained_{}{}_acc_{:.3f}_{}rhos_{}_{}.pt\".format(\n args.arch, args.depth, prec1, args.rho_num, args.config_file, args.sparsity_type))\n print(\"\\n>_ Deleting previous model file with accuracy {:.3f}% now...\\n\".format(max(best_prec1)))\n #if len(best_prec1) > 1:\n # os.remove(args.save_path_exp+\"/retrained_{}{}_acc_{:.3f}_{}rhos_{}_{}.pt\".format(\n # args.arch, args.depth, max(best_prec1), args.rho_num, args.config_file, args.sparsity_type))\n\n epoch_loss_dict[epoch] = idx_loss_dict\n testAcc.append(prec1)\n\n best_prec1.append(prec1)\n print(\"current best acc is: {:.4f}\".format(max(best_prec1)))\n\n rate = test_column_sparsity(model)\n rate = test_filter_sparsity(model)\n\n print(\"Best Acc: {:.4f}%\".format(max(best_prec1)))\n #np.save(strftime(\"./plotable/%m-%d-%Y-%H:%M_plotable_{}.npy\".format(args.sparsity_type)), epoch_loss_dict)\n #np.save(strftime(\"./plotable/%m-%d-%Y-%H:%M_testAcc_{}.npy\".format(args.sparsity_type)), testAcc)\n\n\ndef train(ADMM, train_loader,criterion, optimizer, scheduler, epoch, args):\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n idx_loss_dict = {}\n\n # switch to train mode\n model.train()\n\n if args.masked_retrain and not args.combine_progressive:\n print(\"full acc re-train masking\")\n masks = {}\n for name, W in (model.named_parameters()):\n # if name not in ADMM.prune_ratios:\n # continue\n # above_threshold, W = admm.weight_pruning(args, W, ADMM.prune_ratios[name])\n # W.data = W\n # masks[name] = above_threshold\n weight = W.cpu().detach().numpy()\n non_zeros = weight != 0\n non_zeros = non_zeros.astype(np.float32)\n zero_mask = torch.from_numpy(non_zeros).cuda()\n W = torch.from_numpy(weight).cuda()\n W.data = W\n masks[name] = zero_mask\n elif args.combine_progressive:\n print(\"progressive admm-train/re-train masking\")\n masks = {}\n for name, W in (model.named_parameters()):\n weight = W.cpu().detach().numpy()\n non_zeros = weight != 0\n non_zeros = non_zeros.astype(np.float32)\n zero_mask = torch.from_numpy(non_zeros).cuda()\n W = torch.from_numpy(weight).cuda()\n W.data = W\n masks[name] = zero_mask\n\n end = time.time()\n for i, (input, target) in enumerate(train_loader):\n # measure data loading time\n data_time.update(time.time() - end)\n\n # adjust learning rate\n if args.admm:\n admm.admm_adjust_learning_rate(optimizer, epoch, args)\n else:\n scheduler.step()\n\n input=input.float()\n input = input.cuda(non_blocking=True)\n target = target.cuda(non_blocking=True)\n\n if args.mixup:\n input, target_a, target_b, lam = mixup_data(input, target, args.alpha)\n\n # compute output\n output = model(input)\n\n if args.mixup:\n ce_loss = mixup_criterion(criterion, output, target_a, target_b, lam, args.smooth)\n else:\n ce_loss = criterion(output, target, smooth=args.smooth)\n\n if args.admm:\n admm.z_u_update(args, ADMM, model, device, train_loader, optimizer, epoch, input, i, writer) # update Z and U variables\n ce_loss, admm_loss, mixed_loss = admm.append_admm_loss(args, ADMM, model, ce_loss) # append admm losss\n\n # measure accuracy and record loss\n acc1,_ = accuracy(output, target, topk=(1,5))\n\n losses.update(ce_loss.item(), input.size(0))\n top1.update(acc1[0], input.size(0))\n\n # compute gradient and do SGD step\n optimizer.zero_grad()\n\n if args.admm:\n mixed_loss.backward()\n else:\n ce_loss.backward()\n\n if args.combine_progressive:\n with torch.no_grad():\n for name, W in (model.named_parameters()):\n if name in masks:\n W.grad *= masks[name]\n if args.masked_retrain:\n with torch.no_grad():\n for name, W in (model.named_parameters()):\n if name in masks:\n W.grad *= masks[name]\n\n optimizer.step()\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n # print(i)\n if i % args.log_interval == 0:\n for param_group in optimizer.param_groups:\n current_lr = param_group['lr']\n print('({0}) lr:[{1:.5f}] '\n 'Epoch: [{2}][{3}/{4}]\\t'\n 'Status: admm-[{5}] retrain-[{6}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Acc@1 {top1.val:.3f}% ({top1.avg:.3f}%)\\t'\n .format(args.optmzr, current_lr,\n epoch, i, len(train_loader), args.admm, args.masked_retrain, batch_time=data_time, loss=losses, top1=top1))\n if i % 100 == 0:\n idx_loss_dict[i] = losses.avg\n return idx_loss_dict\n\n\n\ndef test(model, criterion, test_loader):\n model.eval()\n losses = AverageMeter()\n correct = 0\n with torch.no_grad():\n for data, target in test_loader:\n data, target = data.cuda(), target.cuda()\n output = model(data)\n loss = criterion(output, target)\n losses.update(loss.item(), data.size(0))\n # test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss\n pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability\n correct += pred.eq(target.view_as(pred)).sum().item()\n\n print('\\nTest set loss: {:.4f}, * Acc@1: {}/{} ({:.2f}%)\\n'.format(\n losses.avg, correct, len(test_loader.dataset),\n 100. * float(correct) / float(len(test_loader.dataset))))\n return losses.avg, (100. * float(correct) / float(len(test_loader.dataset)))\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the accuracy over the k top predictions for the specified values of k\"\"\"\n with torch.no_grad():\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res\n\ndef convert_secs2time(epoch_time):\n need_hour = int(epoch_time / 3600)\n need_mins = int((epoch_time - 3600*need_hour) / 60)\n need_secs = int(epoch_time - 3600*need_hour - 60*need_mins)\n return need_hour, need_mins, need_secs\n\ndef adjust_learning_rate(optimizer, epoch, args):\n \"\"\"Sets the learning rate to the initial LR decayed by 10 every 30 epochs\"\"\"\n lr = args.lr * (0.3 ** (epoch // args.lr_decay))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\nif __name__ == '__main__':\n start_time = time.time()\n main()\n duration = time.time() - start_time\n need_hour, need_mins, need_secs = convert_secs2time(duration)\n print('total runtime: {:02d}:{:02d}:{:02d}'.format(need_hour, need_mins, need_secs))\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":29216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"279961944","text":"import numpy as np\nfrom vicon_anim_parser.character import Joint, JointHinge, \\\nJointBall, JointFree, JointUniversal, Transform\n\ndef create_joint(xml_el, current_id, parent_id):\n joint, joint_el = _create_specific_joint(xml_el, current_id, parent_id)\n _build_common_all(joint, joint_el)\n _build_segment_name(joint, xml_el)\n return joint\n\n\ndef _create_specific_joint(xml_el, current_id, parent_id):\n joint_tags = {\n \"JointFree\": (JointFree, None),\n \"JointBall\": (JointBall, None),\n \"JointHinge\": (JointHinge, \"AXIS\"),\n \"JointHardySpicer\": (JointUniversal, \"AXIS-PAIR\"),\n \"JointDummy\": (Joint, None)\n }\n\n def is_found(joint_element):\n return joint_element is not None\n\n def parse_joint(tag_name, joint_element):\n joint_constructor, attr_name = joint_tags[tag_name]\n joint = joint_constructor(current_id, parent_id)\n\n if attr_name is not None:\n params = joint_element.get(attr_name).strip().split()\n joint.store_params(*map(float, params))\n\n return joint\n\n joint_element = None\n joint = None\n\n for tag_name in joint_tags:\n joint_element = xml_el.find(tag_name)\n if is_found(joint_element):\n joint = parse_joint(tag_name, joint_element)\n break\n\n if joint is None:\n raise Exception(\"joint {0} of segment {1} has unknown type\".format(current_id, xml_el.get(\"NAME\")))\n\n return joint, joint_element\n\ndef _build_common_all(joint, joint_el):\n \"\"\"\n from here:\n https://github.com/jslee02/vsk/tree/master/docs\n\n The orientation is specified as a helical vector. The\n direction of this vector gives the direction of the axis.\n The magnitude of this vector gives the amount of\n rotation around that axis in radians.\n \"\"\"\n\n transform = Transform()\n translation = joint_el.get(\"PRE-POSITION\").strip().split()\n translation = np.array(map(float, translation))\n transform.translation = translation\n\n #epxonential mapping -> axis-angle pair\n angles = joint_el.get(\"PRE-ORIENTATION\").strip().split()\n\n axis = np.array(map(float, angles))\n magnitude = np.linalg.norm(axis)\n if abs(magnitude) < 0.0001:\n #magnitude = angle is too small -> rotation is just identity matrix\n transform.rotation = np.eye(3)\n else:\n axis = axis / magnitude\n transform.rotation = Transform.rotate_around_rad(axis, magnitude)\n\n joint.transform = transform\n joint.name = joint_el.get(\"NAME\")\n\ndef _build_segment_name(joint, xml_el):\n joint.segment_name = xml_el.get(\"NAME\").strip()","sub_path":"vicon_anim_parser/jointVSKFactory.py","file_name":"jointVSKFactory.py","file_ext":"py","file_size_in_byte":2600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"73143244","text":"# First of all we are importing the socket library\nimport socket\n\n# Next create a socket object\ns = socket.socket()\nprint(\"Socket successfully created\")\n\n#reserve a port number\nport = 1456\n\n#bind to the port\ns.bind(('',port))\nprint(\"Socket binded to %s\" %(port))\n\n#put the socket into listenting mode\ns.listen(5)\nprint(\"socket is now listening\")\n\n# a forever loop until we exit\n# or an error occurs\n\nwhile True:\n # Establish connection with client\n c, addr = s.accept()\n print(\"Got connection form\", addr)","sub_path":"server_1.py","file_name":"server_1.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"314103460","text":"from flask import request, render_template, redirect\nfrom senioritis import app, basic_auth, getvalidtags, dbthing\n\n\n@app.route(\"/admin\")\n@basic_auth.required\ndef admin():\n return 'k.'\n\n\n@app.route(\"/admin/thelonelyandtagless/\")\n@app.route(\"/admin/thelonelyandtagless/\")\n@basic_auth.required\n@dbthing\ndef untagged(c, num=15):\n c.execute(\"SELECT ID, name FROM files where ID NOT IN (SELECT file_id from tags)\")\n # c.execute(\"SELECT ID, name FROM files\")\n res = [i for i in c]\n names = [i[1] for i in res]\n ids = [i[0] for i in res]\n return render_template('taggableimagegrid.html', ress=names[:num], ids=ids)\n\n\n@app.route(\"/admin/tagimage/\")\n@basic_auth.required\n@dbthing\ndef tagimage(c, image_id):\n c.execute(\"SELECT name FROM files WHERE ID=%s\", (image_id,))\n image_url = [\"https://s3.amazonaws.com/kyle-picture-bucket/Pictures/\" + i[0].replace(\" \", \"+\") for i in c][0]\n c.execute(\"SELECT name FROM tags WHERE file_id=%s\", (image_id,))\n selected = [i[0] for i in c]\n return render_template('singleimage.html', img_url=image_url, selected=selected, ID=image_id, tags=getvalidtags())\n\n\n@app.route(\"/admin/tagsubmit/\", methods=[\"POST\"])\n@basic_auth.required\n@dbthing\ndef tagsubmit(c, image_id):\n newtags = request.form.getlist(\"tags\")\n c.execute(\"DELETE FROM tags WHERE file_id=%s\", (image_id,))\n c.executemany(\"INSERT INTO tags VALUES (%s, %s)\", list(zip([image_id] * len(newtags), newtags)))\n # conn.commit()\n return redirect(\"/admin/tagimage/\" + image_id)\n\n\n@app.route(\"/admin/deleteimage/\")\n@basic_auth.required\n@dbthing\ndef deleteimage(c, image_id):\n c.execute(\"DELETE FROM files WHERE ID=%s\", (image_id,))\n return \"Okay? Okay.\"\n\n\n@app.route(\"/admin/tageditor\")\n@basic_auth.required\ndef tageditor():\n return render_template(\"tageditor.html\")\n\n\n@app.route(\"/admin/addnewtag\", methods=[\"POST\"])\n@basic_auth.required\n@dbthing\ndef addnewtag(c):\n tagname = request.form[\"tagname\"]\n c.execute(\"INSERT INTO all_tags VALUES (%s)\", (tagname,))\n # conn.commit()\n return \"You got it, boss\"\n\n\n@app.route(\"/admin/delbyfilename/\")\n@basic_auth.required\n@dbthing\ndef delbyfilename(c, filename):\n a = c.execute(\"DELETE FROM files WHERE name=(%s)\", (filename,))\n return \"Kay\" + a\n","sub_path":"senioritis/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":2300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"533293708","text":"\n\nfrom typing import List\n\nclass TwoFairDice:\n\n MIN, MAX, PIVOT = 0, 1000, 50\n\n\n def get_d(self, A, v):\n d = 0\n for a in A:\n if v > a: d -= 1\n if v < a: d += 1\n return d\n\n def finish(self, A:List[int], B:List[int]):\n A2 = sorted(A)\n\n d: int = self.PIVOT\n for b in B:\n d += self.get_d(A2, b)\n print('d=', d)\n\n dp = [[0]*(self.PIVOT*2) for _ in range(len(A)+1)]\n dp[len(B)][d] = 1\n\n for i in range(len(B), len(A)):\n\n for j in range(0, len(A)):\n if j == 0 or A2[j] > A2[j-1]: # lt\n d = self.get_d(A2, A2[j] - 1)\n ways = A2[j] - A2[j-1] - 1 if j != 0 else A2[j]\n for d0 in range(0, self.PIVOT * 2):\n d2 = d0 + d\n if 0 <= d2 < self.PIVOT * 2:\n dp[i+1][d2] += dp[i][d0] * ways\n if j == 0 or A2[j] > A2[j-1]: # eq\n d = self.get_d(A2, A2[j])\n ways = 1\n for d0 in range(0, self.PIVOT * 2):\n d2 = d0 + d\n if 0 <= d2 < self.PIVOT * 2:\n dp[i+1][d2] += dp[i][d0] * ways\n if j == len(A)-1: # gt\n d = self.get_d(A2, A2[j] + 1)\n ways = self.MAX - A2[j]\n for d0 in range(0, self.PIVOT * 2):\n d2 = d0 + d\n if 0 <= d2 < self.PIVOT * 2:\n dp[i + 1][d2] += dp[i][d0] * ways\n print(dp[i+1])\n return dp[len(A)][self.PIVOT]\n\n\n\nif __name__ == '__main__':\n print(TwoFairDice().finish(A=[1, 2 ,4, 3, 5, 6], B=[3, 2, 1, 4, 6]))\n print(TwoFairDice().finish(A=[10, 10, 11, 12, 12, 12, ], B=[11, 12, 12]))\n print(TwoFairDice().finish(A=[50, 50, 50, 50, 50, 50, ], B=[]))\n","sub_path":"apps/daily_coding/topcoder/SRM826Div2-1000.py","file_name":"SRM826Div2-1000.py","file_ext":"py","file_size_in_byte":1926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"521286871","text":"import glob\nimport json\nimport os\nimport shutil\nimport subprocess\n\nimport arrow\n\nROOTDIR = os.path.abspath(os.path.dirname(__file__))\n\nvideo_formats = [\n \".3gp\",\n \".avi\",\n \".m2ts\",\n \".m4v\",\n \".mov\",\n \".mp4\",\n \".mpg\",\n \".wmv\",\n] # '.m2ts', '.avi' cannot be written by exiftool yet\nimage_formats = [\".gif\", \".heic\", \".jpeg\", \".jpg\", \".png\", \".tiff\", \".tif\"]\n\n\n# ['.3gp', '.avi', '.gif', '.heic', '.html', '.jpeg', '.jpg', '.json', '.m2ts', '.m4v', '.mov', '.mp4', '.mpg', '.png', '.tiff']\n\n\ndef download_archives():\n # TODO: find a way to do it automatically ¯\\_(ツ)_/¯\n pass\n\n\ndef verify_archives(tag, total, compression):\n for i in range(1, total + 1):\n archive = f\"takeout-{tag}-{i:03d}.{compression}\"\n if compression == \"zip\":\n command = f\"unzip -qt {archive}\"\n elif compression == \"tgz\":\n command = f\"tar tfz {archive}\"\n else:\n return\n print(archive, os.path.getsize(archive))\n subprocess.check_call(command.split())\n\n\ndef extract_archives(compression):\n for i, archive in enumerate(sorted(glob.glob(f\"takeout*.{compression}\"))):\n targetdir = os.path.splitext(archive)[0]\n if not os.path.exists(targetdir):\n if compression == \"zip\":\n # command = f\"unzip -q -d {targetdir} {archive}\"\n # print(command)\n # subprocess.check_call(command.split())\n # https://github.com/adamhathcock/sharpcompress/issues/315#issuecomment-409894957\n # https://github.com/CocoaPods/CocoaPods/issues/7711#issuecomment-386942543\n command = f\"ditto -V -x -k --sequesterRsrc --rsrc {archive} {targetdir}\"\n elif compression == \"tgz\":\n os.mkdir(f\"{targetdir}\")\n command = f\"tar xfz {archive} -C {targetdir}\"\n else:\n return\n print(command)\n subprocess.check_call(\n command.split()\n ) # , stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n\ndef find_extensions():\n extensions = set()\n for root, dirs, files in os.walk(\".\"):\n for name in files:\n extension = os.path.splitext(name)[1]\n if extension == \".zip\" or extension == \"\":\n continue\n extensions.add(extension.lower())\n print(sorted(extensions))\n\n\ndef move_subfolders_into_batches(dir):\n os.chdir(dir)\n dircontents = os.listdir(\".\")\n dircontents = [d for d in dircontents if not d.startswith(\"batch_\")]\n batchnumber = 14\n batchsize = int(len(dircontents) / batchnumber) + 1\n for i in range(batchnumber):\n batchdir = f\"batch_{i + 1:03d}\"\n s = slice(i * batchsize, (i + 1) * batchsize - 1)\n print(batchdir, s)\n if not os.path.exists(batchdir):\n os.mkdir(batchdir)\n for item in dircontents[s]:\n shutil.move(item, batchdir)\n print(\"-\" * 50)\n\n\nif __name__ == \"__main__\":\n DATA_DIR = \"/Volumes/Photos/frank/\"\n os.chdir(DATA_DIR)\n # download_archives()\n # verify_archives(\"20201123T165802Z\", 32, \"tgz\")\n extract_archives(\"zip\")\n # find_extensions()\n # move_subfolders_into_batches('Google Photos')\n","sub_path":"google_takeout_util.py","file_name":"google_takeout_util.py","file_ext":"py","file_size_in_byte":3226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"355608667","text":"import argparse\nfrom collections import defaultdict\nfrom tokenizer import tokenize\nfrom tokenizer import compute_word_frequencies\nfrom tokenizer import print_sorted_order\n\ndef combine_frequency_map(frequency_map1, frequency_map2):\n map = defaultdict(int)\n for k, v in frequency_map1.items():\n map[k] = v\n for k, v in frequency_map2.items():\n if k in map: map[k]+=v\n else: map[k]=v\n return map\n\ndef read_map(filepath):\n map = defaultdict(int)\n with open(filepath) as f:\n for line in f:\n k, v = line.split(',')\n k, v = k.strip(), int(v.strip())\n if k in map: map[k]+=v\n else: map[k]=v\n return map\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"wordmap1\", help=\"path of the word map to compare\")\n parser.add_argument(\"wordmap2\", help=\"path of the word map to compare\")\n parser.add_argument(\"--output_file\", help=\"path of the output file\")\n args = parser.parse_args()\n filepath_1 = args.wordmap1\n filepath_2 = args.wordmap2\n word_frequencies_1 = read_map(filepath_1)\n word_frequencies_2 = read_map(filepath_2)\n word_frequency = combine_frequency_map(word_frequencies_1, word_frequencies_2)\n if args.output_file:\n print_sorted_order(word_frequency, output_file_name=args.output_file)\n else:\n print_sorted_order(word_frequency)\n\nif __name__=='__main__':\n main()","sub_path":"odyssey/compare_freq_map.py","file_name":"compare_freq_map.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"167044115","text":"a = [5,95]\nprint(a)\ndef partup(a,lo,hi):\n pivot = a[lo]\n pivot_i = lo\n for i in range(lo+1,hi+1):\n if (a[i] <= pivot):\n temp = a[i]\n a[i] = a[pivot_i+1]\n a[pivot_i+1] = temp\n pivot_i += 1\n temp = a[pivot_i]\n a[pivot_i] = pivot\n a[lo] = temp\n return pivot_i\n#print(partup(a,0,len(a)-1))\n\ndef qsort(a,lo,hi):\n if lo < hi:\n g = partup(a,lo,hi-1)\n qsort(a,lo,g)\n qsort(a,g+1,hi)\n return a\n\nprint(qsort(a,0,len(a)))\n","sub_path":"PythonProjects/python_basics/qsort.py","file_name":"qsort.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"649340254","text":"import os\nimport requests\nimport statistics\nfrom datetime import datetime, timedelta\n\nclass Geocode:\n\n def __init__(self, google_apikey):\n self.apikey = google_apikey\n self.baseurl = r'https://maps.googleapis.com/maps/api/geocode'\n\n def get(self, city):\n locations = []\n url = self._make_url(city)\n r = requests.get(url)\n if r.status_code == 200:\n data = r.json()\n if data.get('status', '') == 'OK':\n results = data.get('results', [])\n locations = self._parse_locations(results)\n return locations\n\n\n def _make_url(self, address):\n url = r'{}/json?address={}&key={}'.format(self.baseurl, address, self.apikey)\n return url\n\n def _parse_locations(self, dataset):\n locations = []\n for data in dataset:\n address = data.get('formatted_address', None)\n geodata = data.get('geometry', {})\n locdata = geodata.get('location', {})\n lat = locdata.get('lat', '')\n lng = locdata.get('lng', '')\n\n if address != '' and lat != '' and lng != '':\n locations.append((address, lat, lng))\n return locations\n\n\nclass WeatherStats:\n\n def __init__(self):\n self.clear()\n\n def clear(self):\n self._stats = {}\n self._temps = []\n self._hums = []\n\n def process_data(self, dataset):\n self.clear()\n for date, data in dataset:\n if 'datefrom' not in self._stats:\n self._stats['datefrom'] = date\n self._stats['dateto'] = date\n\n hourly_data = data.get('hourly', {})\n data_points = hourly_data.get('data', [])\n for data_point in data_points:\n if 'time' in data_point:\n dp_date = datetime.fromtimestamp(data_point['time']).date().isoformat()\n if dp_date == date:\n self._process_data_point(data_point)\n\n def _process_data_point(self, data):\n if 'temperature' in data:\n self._temps.append(float(data['temperature']))\n if 'humidity' in data:\n self._hums.append(float(data['humidity']))\n\n def get(self):\n if len(self._temps) > 0:\n tmin = min(x for x in self._temps)\n tmax = max(x for x in self._temps)\n tavg = statistics.mean(self._temps)\n tmed = statistics.median(self._temps)\n data_temp = {}\n data_temp['labels'] = ['Min', 'Avg', 'Med', 'Max']\n data_temp['data'] = [tmin, tavg, tmed, tmax]\n self._stats['temperature'] = data_temp\n if len(self._hums) > 0:\n hmin = min(x for x in self._hums)\n hmax = max(x for x in self._hums)\n havg = statistics.mean(self._hums)\n hmed = statistics.median(self._hums)\n data_hum = {}\n data_hum['labels'] = ['Min', 'Avg', 'Med', 'Max']\n data_hum['data'] = [hmin, havg, hmed, hmax]\n self._stats['humidity'] = data_hum\n return self._stats\n\n\nclass WeatherData:\n\n def __init__(self, forecastio_apikey, google_apikey):\n self.apikey = forecastio_apikey\n self.geo = Geocode(google_apikey)\n self.baseurl = r'https://api.forecast.io/forecast'\n\n def get_stats(self, search, datefrom='', dateto=''):\n dataset = []\n locations = self.geo.get(search)\n if len(locations) > 0:\n address, lat, lng = locations[0]\n data = {}\n data['query'] = search\n data['address'] = address\n data['lat'] = lat\n data['lng'] = lng\n raw_data = self._get_weather_raw(lat, lng, datefrom, dateto)\n stats = WeatherStats()\n stats.process_data(raw_data)\n data['stats'] = stats.get()\n dataset.append(data)\n return dataset\n\n def _get_weather_raw(self, lat, lng, datefrom='', dateto=''):\n dates = self._get_dates(datefrom, dateto)\n dataset = []\n for date in dates:\n url = self._make_url(lat, lng, date)\n r = requests.get(url)\n if r.status_code == 200:\n data = r.json()\n dataset.append((date, data))\n else:\n print(\"Bad status code\", r.status_code)\n return dataset\n\n def _get_dates(self, datefrom='', dateto=''):\n dates = []\n if datefrom == '':\n datefrom = datetime.utcnow()\n dateto = datefrom\n else:\n datefrom = datetime.strptime(datefrom, '%Y-%m-%d')\n dateto = datetime.strptime(dateto, '%Y-%m-%d')\n diff = dateto - datefrom\n for i in range(diff.days + 1):\n date = datefrom + timedelta(days=i)\n dates.append(date.date().isoformat())\n return dates\n\n def _make_url(self, lat, lng, date=''):\n url = r'{}/{}/{},{}'.format(self.baseurl, self.apikey, lat, lng)\n if date != '':\n url += r',{}T00:00:00-0000'.format(date)\n url += r'?units=si&exclude=currently,minutely,daily,alerts,flags'\n return url\n","sub_path":"weatherdata/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"197904014","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n#Pygame\nfrom pygame import *\nfrom pygame.locals import *\nimport math\nimport db\n\t\t\n\n#Char\nclass Char:\n\t\n\tdef __init__(self, fenetre, x, y, couleur, nom, vitesse=4, relief=1):\n\t\t\"\"\"Où 'canvas' le nom du Canvas,\n'x' et 'y' les coordonnées du char,\nnom, un tuple sous la forme:nom = ('nom', x, y, couleur)\"\"\"\n\t\t#Le \"monde\" dans lequel évolue le char (fenêtre, canvas)\n\t\tself.fenetre = fenetre\n\t\t#Données qui seront initialisées plus tard (dans la fonction afficher)\n\t\tself.terrain = None\n\t\tself.Joueurs = None\n\t\t#Coordonnée du char\n\t\tself.char_x, self.char_x0 = x, x\n\t\tself.char_y, self.char_y0 = y, y\n\t\t#Coordonnée du canon\n\t\tself.alpha = math.pi/2\n\t\tself.canon_x = self.char_x + db.TILE//2 + 0.8*db.TILE*math.sin(0)\n\t\tself.canon_y = self.char_y + db.TILE//2 - 0.8*db.TILE*math.cos(0)\n\t\t#Pour la mine\n\t\tself.mine = None\n\t\tself.mine_x = 0\n\t\tself.mine_y = 0\n\t\tself.stock_mine = 1\n\t\tself.timer = 5000\n\t\t#Pour les munitions\n\t\tself.munition = []\n\t\t#Pour le mouvement\n\t\tself.dir = [False, False, False, False]\n\t\tself.vitesse = vitesse\n\t\t#Autre caractéristique ( couleur du char, nom, état (mort ou pas?), relief (taille des bordures) )\n\t\tself.couleur = couleur\n\t\tself.nom = nom\n\t\tself.mort = False\n\t\tself.relief = relief\n\t\n\tdef afficher(self, fenetre):\n\t\t#\n\t\tself.canon_x = self.char_x + db.TILE//2 + 0.8*db.TILE*math.sin(0)\n\t\tself.canon_y = self.char_y + db.TILE//2 - 0.8*db.TILE*math.cos(0)\n\t\t\n\t\t#Le char\n\t\tdraw.rect(fenetre, self.couleur, (self.char_x, self.char_y, db.TILE, db.TILE) )\n\t\t\t#Roues\n\t\tdraw.rect(fenetre, db.GREY, (self.char_x, self.char_y, 6, 32) )\n\t\tdraw.rect(fenetre, db.GREY, (self.char_x+26, self.char_y, 6, 32) )\n\t\t\t#Contour\n\t\tdraw.rect(fenetre, db.BLACK, (self.char_x, self.char_y, 32, 32), 1 )\n\t\t#Le canon\n\t\tdraw.line(fenetre, db.BLACK, (self.char_x+16, self.char_y+16), (self.canon_x, self.canon_y), 5 )\n\t\t#Le pivot\n\t\tdraw.ellipse(fenetre, db.BLACK, (self.char_x+6, self.char_y+6, 20, 20) )\n\t\tdraw.ellipse(fenetre, db.YELLOW, (self.char_x+12, self.char_y+12, 8, 8) )\n\t\n\tdef change_dir(self, event):\n\t\t#Active la direction (pour le rang, voir clavier numérique)\n\t\tif (event == 'Down'):\n\t\t\tself.dir[0] = True\n\t\telif (event == 'Left'):\n\t\t\tself.dir[1] = True\n\t\telif (event == 'Up'):\n\t\t\tself.dir[2] = True\n\t\telif (event == 'Right'):\n\t\t\tself.dir[3] = True\n\t\t\t\n\tdef stop_dir(self, event):\n\t\t#Désactive la direction (pour le rang, voir clavier numérique)\n\t\tif (event == 'Down'):\n\t\t\tself.dir[0] = False\n\t\tif (event == 'Left'):\n\t\t\tself.dir[1] = False\n\t\tif (event == 'Up'):\n\t\t\tself.dir[2] = False\n\t\tif (event == 'Right'):\n\t\t\tself.dir[3] = False\n\t\t\t\n\tdef mouvement_char(self):\n\t\t#Change la direction\n\t\tif self.dir[0]:\n\t\t\tself.char_y += self.vitesse\n\t\tif self.dir[1]:\n\t\t\tself.char_x -= self.vitesse\n\t\tif self.dir[2]:\n\t\t\tself.char_y -= self.vitesse\n\t\tif self.dir[3]:\n\t\t\tself.char_x += self.vitesse\n\t\t\n\n#On affiche la fenêtre\n##fenetre = display.set_mode( (0, 0), FULLSCREEN )\nlongueur, largeur = 32*db.TILE, 20*db.TILE\nfenetre = display.set_mode( (longueur, largeur) )\ndisplay.set_caption(\"Char\")\n\ninit()\n\n#Boucle principale\ncontinuer = 1\nJoueur1 = Char( fenetre, 32, 32, db.YELLOW, ('Joueur', 60, 20, 'White') )\nwhile continuer:\n time.Clock().tick(30)\n\n draw.rect(fenetre, db.NAVAJOWHITE, (0, 0, longueur, largeur) )\n Joueur1.afficher(fenetre)\n #fond = pygame.image.load('background.png').convert()\n #fenetre.blit(fond, (0, 0))\n display.flip()\n \n #Détection des touches / clicks\n for touche in event.get():\n if touche.type == QUIT:\n continuer = 0\n \n elif touche.type == KEYDOWN:\n \"\"\"Touches clavier\"\"\"\n #=========Tileset=========#\n if (touche.key == K_z) or (touche.key == K_a):\n Joueur1.change_dir('Up')\n elif (touche.key == K_q) or (touche.key == K_w):\n Joueur1.change_dir('Left')\n elif (touche.key == K_s):\n Joueur1.change_dir('Down')\n elif (touche.key == K_d):\n Joueur1.change_dir('Right')\n if (touche.key == K_ESCAPE):\n continuer = 0\n if touche.type == KEYUP:\n \"\"\"Touches clavier\"\"\"\n #=========Tileset=========#\n if (touche.key == K_z) or (touche.key == K_a):\n Joueur1.stop_dir('Up')\n elif (touche.key == K_q) or (touche.key == K_w):\n Joueur1.stop_dir('Left')\n elif (touche.key == K_s):\n Joueur1.stop_dir('Down')\n elif (touche.key == K_d):\n Joueur1.stop_dir('Right')\n Joueur1.mouvement_char()\n\n\n\n\n\n\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"229161252","text":"import numpy as np\r\nimport findspark\r\nfindspark.init()\r\n\r\nfindspark.find()\r\nimport pyspark\r\nfindspark.find()\r\n\r\nfrom pyspark.mllib.clustering import KMeans, GaussianMixture, GaussianMixtureModel\r\nfrom pyspark import SparkConf, SparkContext\r\n\r\n\r\ndef getTrueValue(line):\r\n y = np.array([float(x) for x in line.split(',')])\r\n return y[-1] # return the last element (at index -1)\r\n\r\n\r\ndef parseLine(line):\r\n y = np.array([float(x) for x in line.split(',')])\r\n return y[0:-1] # drop the last element (at index -1)\r\n\r\n\r\ndef closestCluster(p, centers):\r\n bestIndex = 0\r\n minDist = float(\"+inf\") # minimum distance\r\n for i in range(len(centers)):\r\n tempDist = np.sum((p - centers[i]) ** 2) # **: exponentiation\r\n if tempDist < minDist:\r\n minDist = tempDist\r\n bestIndex = i\r\n return bestIndex\r\n\r\n\r\ndef main():\r\n sc = SparkContext(master=\"local\", appName=\"K-Means\")\r\n try:\r\n # csv = sc.textFile(sys.argv[1]) if input via cmd\r\n csv = sc.textFile(\"kmeans_data.csv\")\r\n except IOError:\r\n print('No such file')\r\n exit(1)\r\n\r\n parsedData = csv.map(parseLine)\r\n trueValue = csv.map(getTrueValue)\r\n # print for debugging\r\n print(\"number of features: \", len(parsedData.collect()[0]))\r\n # Build the model (cluster the data), K = 2\r\n clusters = KMeans.train(\r\n parsedData, 2, maxIterations=50, initializationMode=\"random\")\r\n g_clusters = GaussianMixture.train(parsedData, 2)\r\n centers = clusters.clusterCenters\r\n # g_centers = g_clusters.clusterCenters\r\n print(\"Final k centers:\", centers) # print for debugging purpose\r\n # print(\"Final k centers for expectation maximization:\", g_centers)\r\n\r\n # for each data point, generate its cluster label:\r\n predictedLabels = parsedData.map(\r\n lambda point: closestCluster(point, centers))\r\n # g_predictedLabels = parsedData.map(lambda point: closestCluster(point, g_centers))\r\n g_predictedLabels = g_clusters.predict(parsedData)\r\n results = predictedLabels.collect()\r\n g_results = g_predictedLabels.collect()\r\n true = trueValue.collect()\r\n accuracy_count = 0 # count how many data points having correct labels\r\n # output in results.txt: i-th row: true label, predicted label for i-th data point:\r\n g_accuracy_count = 0\r\n with open(\"results.txt\", \"w\") as f:\r\n f.write(\"true\\tpredicted\\n\")\r\n for i in range(len(results)):\r\n f.write(str(true[i]) + \"\\t\" + str(results[i]) + \"\\n\")\r\n if int(true[i]) == int(results[i]):\r\n accuracy_count += 1\r\n if int(true[i]) == int(g_results[i]):\r\n g_accuracy_count += 1\r\n\r\n accuracy = accuracy_count / len(results)\r\n g_accuracy = g_accuracy_count / len(results)\r\n if accuracy < 0.5: # our predicted label IDs might be opposite\r\n accuracy = 1 - accuracy\r\n \r\n if g_accuracy < 0.5:\r\n g_accuracy = 1 - g_accuracy\r\n print(\"accuracy is :\", accuracy)\r\n print(\"EM accuracy is : \", g_accuracy)\r\n sc.stop()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"km.py","file_name":"km.py","file_ext":"py","file_size_in_byte":3089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"154287270","text":"\"\"\"\n3431. 준환이의 운동관리\n\n\n최근 경도비만 판정을 받은 준환이는 적절한 몸을 유지하기 위하여 1주일에 L분 이상 U분 이하의 운동을 하여야 한다.\n\n준환이는 이번 주에 X분만큼 운동을 하였다.\n\n당신은 준환이가 제한되어 있는 시간을 넘은 운동을 한 것인지, 그것이 아니라면 몇 분 더 운동을 해야 제한을 맞출 수 있는지 출력하는 프로그램을 작성해야 한다.\n\n\n[입력]\n\n첫 번째 줄에 테스트 케이스의 수 T가 주어진다.\n\n각 테스트 케이스의 첫 번째 줄에는 세 정수 L, U, X(0≤ L ≤ U ≤ 107, 0 ≤ X ≤ 107)가 공백으로 구분되어 주어진다.\n\n\n[출력]\n\n각 테스트 케이스마다 I가 필요한 양보다 더 많은 운동을 하고 있다면 -1을 출력하고, 아니라면 추가로 몇 분을 더 운동해야 하는지 출력한다.\n \n\"\"\"\n\ndef checkTime(L,U,X):\n if U < X :\n return -1\n elif L < X :\n return 0\n else:\n return L - X\n\n\nT = int(input())\n\nfor t in range(1,T+1):\n L,U,X = map(int,input().split())\n res = checkTime(L,U,X)\n print(f\"#{t} {res}\")","sub_path":"OnlineJudge/SWExpertAcademy/Problem/D3/3431.py","file_name":"3431.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"194046196","text":"import urllib.request\nimport requests\nimport unittest\nimport json\nimport utils\nimport hmac\nimport hashlib\nimport string\nimport xlrd\nimport data\nimport con_login\nfrom asyncio.tasks import sleep\nimport read\nimport random\n\ndef getHtml(url):\n page =urllib.request.urlopen(url)\n html = \"D:\\/pyrequest\\/common\"\n for line in page.readlines():\n html = html+str(line)+\"\\n\"\n return html\nclass v_2_3(unittest.TestCase):\n# def __init__(self,key,value):\n# unittest.TestCase.__init__(self,key,value)\n# self.key = key\n# self.value = value\n\n def setUp(self):\n self.url = \"http://119.27.167.20:8083/mooka/defaultinfo.json\"\n self.url2 = \"http://119.27.167.20:8083/member/login.json\"\n self.url3 = \"http://119.27.167.20:8083/dynamic/recentaward.json\"\n self.url4=\"http://119.27.167.20:8083/member/expinfo.json\"\n self.url5=\"http://119.27.167.20:8083/task/sign.json\"\n self.url6=\"http://119.27.167.20:8083/reward/ranks.json\"\n self.url7=\"http://119.27.167.20:8083/member/addinvitedcode.json\"\n self.url8=\"http://119.27.167.20:8083/activity/actrank.json\"\n def login_common(self):\n parm={'telephone':'15882481462','captcha':'123456'}\n r = requests.post(self.url2,data=parm)\n token=r.json()['data']['token']\n \n return token\n def commom_header(self):\n \n \n headers={\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 '\n '(KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36',\n 'Accept': '*/*',\n 'Accept-Language': 'zh-CN,zh;q=0.9',\n 'Accept-Encoding': 'gzip, deflate',\n 'Referer': 'http://119.27.167.20/',\n 'token':v_2_3.login_common(self)\n }\n return headers\n def test_awardgift(self):\n parms = {'type':'2'}\n \n r = requests.get(self.url,params=parms,headers=v_2_3.commom_header(self))\n msg=r.json()['msg']\n print('积分列表')\n \n \n self.assertEqual(msg,'success', ('Fail',r.json))\n \n \n \n def test_awardgift2(self):\n parms = {'type':'1'}\n \n r = requests.get(self.url,params=parms,headers=v_2_3.commom_header(self))\n msg=r.json()['msg']\n print('药丸列表')\n \n \n self.assertEqual(msg,'success', ('Fail',r.json()))\n \n def test_recentawardgiftlist(self):\n params={'id':data.test_get_list}\n r= requests.get(self.url3, params=params)\n msg=r.json()['msg']\n print('单个圈圈最近打赏列表')\n \n self.assertEqual(msg,'success',('Fail',r.json()))\n \n def test_expinfo(self):\n r= requests.get(self.url4,headers=con_login.common_header(self))\n \n print('打卡及经验显示') \n \n self.assertEqual(r.json()['msg'],'success',('Fail',r.json()))\n \n \n def test_sign(self):\n r=requests.post(self.url5,headers=con_login.common_header(self))\n print('签到:')\n try:\n tag=r.json()['data']\n \n msg=r.json()['msg']\n if tag == True:\n print('已经签到过了')\n \n if tag !=True:\n print('签到成功')\n \n except:\n print('签到失败',r.json())\n def test_rewardranks(self):\n r=requests.get(self.url6, headers=con_login.common_header(self)) \n msg=r.json()['msg']\n print('奖励详情')\n \n self.assertEqual(msg,'success',('Fail',r.json()))\n \n def test_addinvitedcode(self):\n sheetName = 'Sheet1'\n# print(read.get_excel_data('/pyrequest/source/testinvitedcode.xlsx',sheetName))\n list=[108811,None,122342]\n value = random.sample(list,1)\n \n params={' invited_code':value}\n \n r= requests.post(self.url7, params=params, headers=con_login.common_header(self))\n \n try:\n if r.json()['msg']=='已经填写过邀请码了,不能重复填写':\n print('已经填写过邀请码了,不能重复填写,pass')\n if r.json()['code']==100020:\n print('邀请码不能为空,pass')\n if r.json()['msg']=='邀请码错误啦!怎么肥四' :\n print('邀请码错误,pass')\n if r.json()['code']==200:\n print('添加成功')\n except:\n print('invite系统异常',r.json()) \n \n# r= requests.get(url, params)\n def test_getrankact(self):\n r= requests.get(self.url8)\n \n self.assertEqual(r.json()['msg'],'success',('Fail',r.json()))\n \n \n \n \n \n \nif __name__ == '__main__':\n unittest.main()\n ","sub_path":"interface/test_v2_3.py","file_name":"test_v2_3.py","file_ext":"py","file_size_in_byte":4923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"110955929","text":"from app import app\nfrom flask import render_template, request, redirect, url_for\nfrom app.models import *\nimport pdb\n\n@app.route('/', methods=['GET'])\n@app.route('/index', methods=['GET'])\ndef index():\n\tuser = {'name': 'Jason Brooks'}\n\tfriends = [{'name': 'Shafeeq Ibraheem'}, {'name': 'Peter Salovey'}]\n\treturn render_template('index.html', title=\"Homepage\", user=user, friends=friends, posts=reversed(Post.query.all()))\n\n@app.route('/new-post', methods=['GET', 'POST'])\ndef newPost():\n\tif request.method == 'GET':\n\t\treturn render_template('new-post.html', title=\"New Post\")\n\ttitle = request.form['post-title']\n\tcontent = request.form['post-content']\n\tauthor_email = request.form['post-author']\n\tauthor = User.query.filter(User.email == author_email).first()\n\tpost = Post(title=title, body=content, author=author)\n\tdb.session.add(post)\n\tdb.session.commit()\n\tpost = {'title': title, 'content': content}\n\treturn redirect(url_for('index'))","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"140959511","text":"# main.py\nfrom django.conf.urls import include, url\nfrom blog.views import *\nurlpatterns = [\n url(r'^hello/$', hello),\n url(r'^blogList/$', blogList),\n url(r'^bookList/$', getBookList),\n url(r'^ajaxBookInfo/$', ajaxBookInfo),\n url(r'^findBookHero/$', findBookHero),\n]","sub_path":"myweb/blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"328719800","text":"import websockets\nimport asyncio\nimport threading \n\ndef worker(loop, port, callback):\n asyncio.set_event_loop(loop)\n # https://pypi.org/project/websockets/\n async def echo(websocket, path):\n async for message in websocket:\n callback(message)\n #print(\"wsserve.py revceived message\", message)\n # send it back!\n await websocket.send(message)\n print(\"wsserve.py running websocket server\")\n\n loop.run_until_complete(\n websockets.serve(echo, 'localhost', port)\n )\n loop.run_forever()\n \ndef run_websocket(callback, port=8765):\n \"\"\"\n starts a websocket server in a separate thread\n on the specified port (or 8765) by default\n \"\"\"\n # https://stackoverflow.com/questions/48725890/runtimeerror-there-is-no-current-event-loop-in-thread-thread-1-multithreadi\n loop = asyncio.new_event_loop()\n p = threading.Thread(target=worker, args=(loop,port,callback))\n p.start()\n\n\n","sub_path":"ddsp-workshop/ws_server.py","file_name":"ws_server.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"36032066","text":"# Problem 1, Multiples of 3 and 5\r\n# 2017-07-30\r\n# Answer : 233168 \r\n\r\ndef solve_p1():\r\n sum = 0\r\n for i in range(1, 1000, 1):\r\n if (i % 3 == 0) or (i % 5 == 0):\r\n \tsum = sum + i\r\n print(\"result :\", sum)\r\n\r\nsolve_p1()\r\n","sub_path":"Problem_1.py","file_name":"Problem_1.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"470684671","text":"from dival import TaskTable\nfrom dival.measure import PSNR, SSIM\n\nfrom dliplib.utils import Params\nfrom dliplib.utils.data.datasets import CachedDataset\nfrom dliplib.utils.helper import select_hyper_best_parameters, load_standard_dataset\nfrom dliplib.utils.reports import save_results_table\nfrom dliplib.utils.weights import get_weights_path\nfrom dliplib.reconstructors.iradonmap import IRadonMapReconstructor\n\n\n# load data\ndataset = load_standard_dataset('lodopab', ordered=True)\nray_trafo = dataset.ray_trafo\n\nglobal_results = []\nreconstructor = None\ntask_table = None\n\nfull_size_epochs = 15\nsizes = [0.0001, 0.001, 0.01, 0.1, 1.00]\n#sizes = [0.0001]\n#sizes = [0.001]\n#sizes = [0.01]\n#sizes = [0.1]\n\n\nfor size_part in sizes:\n del(task_table)\n del(reconstructor)\n\n cached_dataset = CachedDataset(dataset,\n space=(ray_trafo.range, ray_trafo.domain),\n cache_files={'train': [None, None],\n 'validation': [None, None]},\n size_part=size_part)\n\n test_data = dataset.get_data_pairs('validation',\n cached_dataset.validation_len)\n print('validation size: %d' % len(test_data))\n\n reconstructor = IRadonMapReconstructor(\n ray_trafo=ray_trafo,\n log_dir='lodopab_iradonmap/' + str(size_part),\n save_best_learned_params_path=get_weights_path(\n 'lodopab_iradonmap_{}'.format(size_part)))\n\n epochs = min(10 * full_size_epochs, int(1./size_part * full_size_epochs))\n\n # create a Dival task table and run it\n task_table = TaskTable()\n task_table.append(reconstructor=reconstructor, measures=[PSNR, SSIM],\n test_data=test_data, dataset=cached_dataset,\n hyper_param_choices={'scales': [5],\n 'skip_channels': [4],\n 'batch_size': [2],\n 'epochs': [epochs],\n 'fully_learned': [True],\n 'lr': [0.01],\n 'use_sigmoid': [False]})\n results = task_table.run()\n\n # save report\n save_results_table(results, 'lodopab_iradonmap_{}'.format(size_part))\n\n # select best parameters and save them\n best_choice, best_error = select_hyper_best_parameters(results)\n params = Params(best_choice)\n params.save('lodopab_iradonmap_{}'.format(size_part))\n","sub_path":"dliplib/train/lodopab_iradonmap.py","file_name":"lodopab_iradonmap.py","file_ext":"py","file_size_in_byte":2604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"129986268","text":"# !/usr/bin/python3\n# coding: utf-8\n\n\"\"\"Removes ending -..... from downloaded YouTube videos\"\"\"\n\nimport argparse\nimport os\nimport shutil\n\nimport colorama\nfrom colorama import Fore, Style\nfrom hal.files.models import system\nfrom hal.files.models.files import Document\nfrom hal.streams.user import UserInput\n\nCONTROL_CHAR = '-'\nCONTROL_CHAR_INDEX = -12\nUSER = UserInput()\n\ncolorama.init()\n\n\ndef create_args():\n \"\"\"\n :return: ArgumentParser\n Parser that handles cmd arguments.\n \"\"\"\n\n parser = argparse.ArgumentParser(usage='-f '\n '-h for full usage')\n parser.add_argument('-f', dest='filesystem',\n help='file to rename or folder to search',\n required=True)\n return parser\n\n\ndef parse_args(parser):\n \"\"\"\n :param parser: ArgumentParser\n Object that holds cmd arguments.\n :return: tuple\n Values of arguments.\n \"\"\"\n\n args = parser.parse_args()\n file_system = str(args.filesystem)\n assert os.path.exists(file_system)\n return file_system\n\n\ndef is_downloaded_from_youtube(file):\n doc = Document(file)\n file_name = doc.name\n file_extension = doc.extension\n\n try:\n control_char_matches = (file_name[CONTROL_CHAR_INDEX] == CONTROL_CHAR)\n previous_char_matches = (file_name[CONTROL_CHAR_INDEX - 1] != ' ')\n next_char_matches = (file_name[CONTROL_CHAR_INDEX + 1] != ' ')\n next_chars_matches = ' ' not in file_name[CONTROL_CHAR_INDEX:-1]\n\n name_matches = (control_char_matches and\n previous_char_matches and\n next_char_matches and\n next_chars_matches)\n\n extension_matches = (file_extension in ['.mp3', '.webm'])\n\n return name_matches and extension_matches\n except:\n return False\n\n\ndef find_files(folder):\n files = system.list_content(folder, recurse=True) # find files\n return files\n\n\ndef rename_file(file):\n if is_downloaded_from_youtube(file):\n doc = Document(file)\n file_name = doc.name\n file_parent_folder = doc.get_path_name()[0]\n file_extension = doc.extension\n\n new_name = file_name[:CONTROL_CHAR_INDEX] # find new name\n new_path = file_parent_folder + new_name + file_extension # full path\n\n question = 'Do you want to rename ' + \\\n Fore.GREEN + Style.BRIGHT + file_name + Style.RESET_ALL + \\\n ' to ' + Fore.BLUE + Style.BRIGHT + new_name + \\\n Style.RESET_ALL + ' ?' + Fore.RED + Style.BRIGHT\n\n if USER.get_yes_no(question):\n shutil.move(file, new_path) # rename\n\n print(Style.RESET_ALL)\n\n\ndef rename_folder(folder):\n files = find_files(folder)\n for file in files:\n rename_file(file)\n\n\ndef main():\n file_system = parse_args(create_args())\n\n if os.path.isfile(file_system):\n rename_file(file_system)\n elif os.path.isdir(file_system):\n rename_folder(file_system)\n else:\n print('Not a file, nor a folder')\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"youtube/youtube-audio-fix.py","file_name":"youtube-audio-fix.py","file_ext":"py","file_size_in_byte":3195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"497195607","text":"import plotly.graph_objects as go\r\nimport plotly.express as px\r\nfrom plotly.subplots import make_subplots\r\n\r\ndef graphplot(l1,l2,f1):\r\n fig = go.Figure(\r\n data=[go.Bar(x=l1, y=l2)],\r\n layout=dict(title=dict(text=f1))\r\n )\r\n fig.write_html(\"./static/graph/\"+f1+'.html',auto_open=False)\r\n\r\ndef Scatterplot(l1,l2,f1):\r\n fig = go.Figure(\r\n data=[go.Scatter(x=l1, y=l2)],\r\n layout=dict(title=dict(text=f1))\r\n )\r\n fig.write_html(\"./static/graph/\"+f1+'.html',auto_open=False)\r\n\r\ndef pointplot(l1,l2,f1):\r\n fig = go.Figure(\r\n data=[go.Scatter(x=l1, y=l2,mode='markers')],\r\n layout=dict(title=dict(text=f1))\r\n )\r\n fig.write_html(\"./static/graph/\"+f1+'.html',auto_open=False)\r\n\r\ndef plot3d(l1,l2,l3,f1):\r\n fig = go.Figure(data=[go.Scatter3d(x=l1, y=l2,z=l3,mode=\"markers\",marker=dict(color=3,size=5,colorscale='Viridis',opacity=0.8,))],layout=dict(title=dict(text=f1)))\r\n fig.write_html(\"./static/graph/\"+f1+'.html',auto_open=False)\r\n","sub_path":"details/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"558747286","text":"import logging\n\nfrom django.conf import settings\n\nfrom apscheduler.schedulers.blocking import BlockingScheduler\nfrom apscheduler.triggers.cron import CronTrigger\nfrom django.core.management.base import BaseCommand\nfrom django_apscheduler.jobstores import DjangoJobStore\nfrom django_apscheduler.models import DjangoJobExecution\nfrom main.models import Subscriber, Product\nfrom django.utils import timezone\nfrom datetime import timedelta\nfrom django.core.mail import send_mail\nfrom django.shortcuts import render\nfrom django.template import loader\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef my_job():\n week_ago = timezone.now() - timedelta(days=7)\n products_of_the_week = Product.objects.filter(created_on__gt=week_ago)\n if not products_of_the_week:\n return\n content = loader.render_to_string(\"./week_update.html\",{\"products\":products_of_the_week}, None)\n for subscribe in Subscriber.objects.all():\n send_mail(\"Товары недели\", content, None, [subscribe.user.email,]) \n\n\ndef delete_old_job_executions(max_age=604_800):\n \"\"\"This job deletes all apscheduler job executions older than `max_age` from the database.\"\"\"\n DjangoJobExecution.objects.delete_old_job_executions(max_age)\n\n\nclass Command(BaseCommand):\n help = \"Runs apscheduler.\"\n\n def handle(self, *args, **options):\n scheduler = BlockingScheduler(timezone=settings.TIME_ZONE)\n scheduler.add_jobstore(DjangoJobStore(), \"default\")\n \n scheduler.add_job(\n delete_old_job_executions,\n trigger=CronTrigger(\n day_of_week=\"mon\", hour=\"00\", minute=\"00\"\n ), # Midnight on Monday, before start of the next work week.\n id=\"delete_old_job_executions\",\n max_instances=1,\n replace_existing=True,\n )\n logger.info(\n \"Added weekly job: 'delete_old_job_executions'.\"\n )\n\n try:\n logger.info(\"Starting scheduler...\")\n scheduler.start()\n except KeyboardInterrupt:\n logger.info(\"Stopping scheduler...\")\n scheduler.shutdown()\n logger.info(\"Scheduler shut down successfully!\")","sub_path":"main/management/commands/schedule.py","file_name":"schedule.py","file_ext":"py","file_size_in_byte":2182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"428198580","text":"from turtle import Turtle, Screen\nimport random\n\nis_race_on = False\n\nmy_screen = Screen()\nmy_screen.bgcolor(\"black\")\nmy_screen.setup(width=500, height=400)\n\nuser_bet = my_screen.textinput(title=\"Make your bet\", prompt=\"Which turtle will win the race? Enter a color: \")\n\ncolors = [\"red\", \"orange\", \"yellow\", \"green\", \"blue\", \"purple\"]\ny_positions = [-70, -40, -10, 20, 50, 80]\nall_turtles = []\n\nfor turtle_index in range(0, 6):\n new_turtle = Turtle(shape=\"turtle\")\n new_turtle.penup()\n new_turtle.color(colors[turtle_index])\n new_turtle.goto(x=-230, y=y_positions[turtle_index])\n all_turtles.append(new_turtle)\n\nif user_bet:\n is_race_on = True\n\n\ndef ending_line():\n tim = Turtle()\n tim.hideturtle()\n tim.color(\"white\")\n tim.penup()\n tim.goto(x=230, y=0)\n tim.pendown()\n for number_to_multi in range(1, 3):\n tim.right(90*number_to_multi)\n tim.forward(100*number_to_multi)\n\n\nending_line()\nwhile is_race_on:\n for turtle in all_turtles:\n # 230 is 250 - half the width of the turtle.\n if turtle.xcor() > 230:\n is_race_on = False\n winning_color = turtle.pencolor()\n if winning_color == user_bet:\n print(f\"You've won! The {winning_color} turtle is the winner!\")\n else:\n print(f\"You've lost! The {winning_color} turtle is the winner!\")\n\n # Make each turtle move a random amount.\n rand_distance = random.randint(0, 10)\n turtle.forward(rand_distance)\n\nmy_screen.exitonclick()\n","sub_path":"turtle_graphics/turtle_race_game.py","file_name":"turtle_race_game.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"124967157","text":"# basic cog\nimport discord\nfrom discord.ext import commands\n\n\n\"\"\"A simple cog example with simple commands. Some with the use of events in cogs.\nRewrite docs:\nhttp://dischttp://discordpy.readthedocs.io/en/rewrite/\nYou could also create your own custom checks. Check out:\nhttps://github.com/Rapptz/discord.py/blob/master/discord/ext/commands/core.py#L689\nFor a list of events:\nhttp://discordpy.readthedocs.io/en/rewrite/api.html#event-reference\nhttp://discordpy.readthedocs.io/en/rewrite/ext/commands/api.html#event-reference\n\"\"\"\n\n\nclass Simple:\n \"\"\"Some simple commands for the rewrite cog\"\"\"\n\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command(name='repeat', aliases=['copy', 'mimic'])\n async def do_repeat(self, ctx, *, our_input: str):\n \"\"\"A simple command which repeats our input.\n In rewrite Context is automatically passed to our commands as the first argument after self.\"\"\"\n\n await ctx.send(our_input)\n\n @commands.command(name='add', aliases=['plus'])\n @commands.guild_only()\n async def do_addition(self, ctx, first: int, second: int):\n \"\"\"A simple command which does addition on two integer values.\"\"\"\n\n total = first + second\n await ctx.send(f'The sum of **{first}** and **{second}** is **{total}**')\n\n @commands.command(name='embeds')\n @commands.guild_only()\n async def example_embed(self, ctx):\n \"\"\"A simple command which showcases the use of embeds.\n Try changing colors and names etc.\"\"\"\n\n embed = discord.Embed(title='Example Embed',\n description='Showcasing the use of Embeds...\\nSee the visualizer for more info.',\n colour=0x98FB98)\n embed.set_author(name='GeorgeCY',\n url='https://gist.github.com/MysterialPy/public',\n icon_url='https://www.w3schools.com/w3css/w3css_images.asp')\n embed.set_image(url='https://www.google.no/imgres?imgurl=https%3A%2F%2Fthemeawesome.com%2Fthemes%2Ftotalpress%2Fwp-content%2Fuploads%2F2012%2F12%2Funicorn-wallpaper.jpg&imgrefurl=https%3A%2F%2Fthemeawesome.com%2Fthemes%2Ftotalpress%2Fpost-format-image%2F&docid=a0UGZeBM_0JzLM&tbnid=ZPIb9IX5WOejGM%3A&vet=10ahUKEwjGy8-56dDbAhXBKJoKHVccA_0QMwhnKA4wDg..i&w=1600&h=1200&bih=974&biw=1920&q=image&ved=0ahUKEwjGy8-56dDbAhXBKJoKHVccA_0QMwhnKA4wDg&iact=mrc&uact=8')\n \n embed.set_footer(text='Made in Python with discord.py@rewrite', icon_url='http://i.imgur.com/5BFecvA.png')\n\n await ctx.send(content='**A simple Embed for discord.py@rewrite in cogs.**', embed=embed)\n\n async def on_member_ban(self, guild, user):\n \"\"\"Event Listener which is called when a user is banned from the guild.\n This will print some stuff into the console\n \"\"\"\n\n print(f'{user.name}-{user.id} was banned from {guild.name}-{guild.id}')\n\n# The setup fucntion below is neccesarry. Remember we give bot.add_cog() the name of the class in this case Simple.\n# When we load the cog, we use the name of the file.\n# Remember to have the bot parameter, and run the bot.add_cog.\n# You could have the cog inside the main file just with bot.add_cog(ClassName(bot)).\ndef setup(bot):\n bot.add_cog(Simple(bot))\n","sub_path":"Rewrite/using_cogs/cogs/simple.py","file_name":"simple.py","file_ext":"py","file_size_in_byte":3249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"354967179","text":"###\n# Copyright (c) 2021, Chase Phelps\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice,\n# this list of conditions, and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions, and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of the author of this software nor the name of\n# contributors to this software may be used to endorse or promote products\n# derived from this software without specific prior written consent.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\n###\n\nfrom supybot import conf, registry\ntry:\n from supybot.i18n import PluginInternationalization\n _ = PluginInternationalization('Dienste')\nexcept:\n # Placeholder that allows to run the plugin on a bot\n # without the i18n module\n _ = lambda x: x\n\n\ndef configure(advanced):\n # This will be called by supybot to configure this module. advanced is\n # a bool that specifies whether the user identified themself as an advanced\n # user or not. You should effect your configuration by manipulating the\n # registry as appropriate.\n from supybot.questions import expect, anything, something, yn\n conf.registerPlugin('Dienste', True)\n\n\nDienste = conf.registerPlugin('Dienste')\n# This is where your configuration variables (if any) should go. For example:\n# conf.registerGlobalValue(Dienste, 'someConfigVariableName',\n# registry.Boolean(False, _(\"\"\"Help for someConfigVariableName.\"\"\")))\n#alrighty then, let's get a file list\nimport os, re\ntxtdir = '/'.join(os.path.realpath(__file__).split('/')[:-1])+'/'\n### get our api keys\nwith open(txtdir+'apikeys','r') as infile:\n for line in infile.readlines():\n line=line.strip().split('=')\n conf.registerGlobalValue(Dienste, line[0], registry.String(line[1],\n_(\"\"\"API key.\"\"\")))\ntxtdir+='txts'\nfilesavail = {}\nfilenames = os.listdir(txtdir)\nfor filename in filenames:\n if not os.path.isfile(txtdir+'/'+filename):\n continue\n fsize = os.path.getsize(txtdir+'/'+filename)\n funit='B'\n if fsize>=1073742000:\n funit = 'GiB'\n fsize/=1073742000\n elif fsize>=1048576:\n funit = 'MiB'\n fsize/=1048576\n elif fsize>=1024:\n funit = 'KiB'\n fsize/=1024\n fsize=round(fsize,2)\n filesavail[filename] = str(fsize)+funit\n# make the string representation of the dict a space separated list\nfilesavail = re.sub(',','',re.sub(':','',re.sub('\\'','',str(filesavail)[1:-1])))\n# the file list with filenames followed by sizes\nconf.registerGlobalValue(Dienste, 'txtdir', registry.String(txtdir,\n_(\"\"\"Base directory for txt files.\"\"\")))\nconf.registerGlobalValue(Dienste, 'filesavail', registry.SpaceSeparatedListOfStrings(filesavail, _(\"\"\"Available files.\"\"\")))\n\n# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":3829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"584424934","text":"import cv2\nimport numpy as np\nfrom psd_tools import PSDImage\n\nfrom imageio import imread, imwrite\n\n\n\n# psd1 = PSDImage.load(\"SIFT.psd\")\n# psd1.as_PIL().save(\"psd_image_to_detect1.png\")\n#\n# img = imread(\"psd_image_to_detect1.png\")\n# img = np.rot90(img, 1, (0, 1))\n# imwrite(\"psd_image_to_detect2.png\", img)\n\n\npsd_img_1 = cv2.imread('similar/img/7_Template.jpg', cv2.IMREAD_GRAYSCALE)\npsd_img_2 = cv2.imread('similar/img/7_Verify.jpg', cv2.IMREAD_GRAYSCALE)\n\n\nsift = cv2.xfeatures2d.SIFT_create()\n\npsd_kp1, psd_des1 = sift.detectAndCompute(psd_img_1, None)\npsd_kp2, psd_des2 = sift.detectAndCompute(psd_img_2, None)\n\n# 4) Flann特征匹配\nFLANN_INDEX_KDTREE = 1\nindex_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)\nsearch_params = dict(checks=50)\n\nflann = cv2.FlannBasedMatcher(index_params, search_params)\n\n# knn 应该是 对于每个psd_des1 的元素 找到 k个相似的psd_des2 元素 k为2是为了提高准确度\nmatches = flann.knnMatch(psd_des1, psd_des2, k=2)\ngoodMatch = []\nfor m, n in matches:\n # goodMatch是经过筛选的优质配对,如果2个配对中第一匹配的距离小于第二匹配的距离的1/2,基本可以说明这个第一配对是两幅图像中独特的,不重复的特征点,可以保留。\n if m.distance < 0.50*n.distance:\n goodMatch.append(m)\n# 增加一个维度\ngoodMatch = np.expand_dims(goodMatch, 1)\nprint(goodMatch.shape)\nprint(goodMatch[:20])\n\nimg_out = cv2.drawMatchesKnn(psd_img_1, psd_kp1, psd_img_2, psd_kp2, goodMatch[:30], None, flags=2)\n\ncv2.imshow('image', img_out)#展示图片\ncv2.waitKey(0)#等待按键按下\ncv2.destroyAllWindows()#清除所有窗口\n\n\n","sub_path":"cy_cv/cySIFT.py","file_name":"cySIFT.py","file_ext":"py","file_size_in_byte":1635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"416911930","text":"import numpy as np\nimport scipy\nfrom scipy import signal\nimport matplotlib.pyplot as plt\nimport math\n\n\ndef DTWDistance(s1, s2):\n DTW = {}\n\n for i in range(len(s1)):\n DTW[(i, -1)] = float('inf')\n for i in range(len(s2)):\n DTW[(-1, i)] = float('inf')\n DTW[(-1, -1)] = 0\n\n for i in range(len(s1)):\n for j in range(len(s2)):\n dist = (s1[i] - s2[j]) ** 2\n DTW[(i, j)] = dist + min(DTW[(i - 1, j)], DTW[(i, j - 1)], DTW[(i - 1, j - 1)])\n\n return math.sqrt(DTW[len(s1) - 1, len(s2) - 1])\n\n\nA, B, C, D, E, F, G, H = [], [], [], [], [], [], [], []\nfile_path = 'C:\\\\Users\\\\陈哥\\\\Desktop\\\\周期呼吸波形\\\\standard1.txt'\nwith open(file=file_path, mode='r+', encoding='utf-8') as f:\n for line in f.readlines():\n line = line.strip('\\n') # 去掉列表中每一个元素的换行符\n A.append(float(line))\n\nfile_path = 'C:\\\\Users\\\\陈哥\\\\Desktop\\\\周期呼吸波形\\\\standard2.txt'\nwith open(file=file_path, mode='r+', encoding='utf-8') as f:\n for line in f.readlines():\n line = line.strip('\\n') # 去掉列表中每一个元素的换行符\n B.append(float(line))\n\nfile_path = 'C:\\\\Users\\\\陈哥\\\\Desktop\\\\周期呼吸波形\\\\standard3.txt'\nwith open(file=file_path, mode='r+', encoding='utf-8') as f:\n for line in f.readlines():\n line = line.strip('\\n') # 去掉列表中每一个元素的换行符\n C.append(float(line))\n\nfile_path = 'C:\\\\Users\\\\陈哥\\\\Desktop\\\\周期呼吸波形\\\\standard4.txt'\nwith open(file=file_path, mode='r+', encoding='utf-8') as f:\n for line in f.readlines():\n line = line.strip('\\n') # 去掉列表中每一个元素的换行符\n D.append(float(line))\n\nfile_path = 'C:\\\\Users\\\\陈哥\\\\Desktop\\\\周期呼吸波形\\\\standard5.txt'\nwith open(file=file_path, mode='r+', encoding='utf-8') as f:\n for line in f.readlines():\n line = line.strip('\\n') # 去掉列表中每一个元素的换行符\n E.append(float(line))\n\nfile_path = 'C:\\\\Users\\\\陈哥\\\\Desktop\\\\周期呼吸波形\\\\standard6.txt'\nwith open(file=file_path, mode='r+', encoding='utf-8') as f:\n for line in f.readlines():\n line = line.strip('\\n') # 去掉列表中每一个元素的换行符\n F.append(float(line))\n\n# file_path = 'C:\\\\Users\\\\陈哥\\\\Desktop\\\\周期呼吸波形\\\\6.txt'\n# with open(file=file_path, mode='r+', encoding='utf-8') as f:\n# for line in f.readlines():\n# line = line.strip('\\n') # 去掉列表中每一个元素的换行符\n# G.append(float(line))\n\nG.append(A)\nG.append(B)\nG.append(C)\nG.append(D)\nG.append(E)\nG.append(F)\n\n# for i in range (0,7):\n# H.append(DTWDistance(H[0],H[i]))\n# print(H)\nfor i in range(0, 6):\n for j in range(0, 6):\n H.append(DTWDistance(G[i], G[j]))\nprint(H)\n\nI = []\n\ntemp_sum = 0\nfor i in range(0, 6):\n temp_sum += H[i]\nI.append(temp_sum)\n\ntemp_sum = 0\nfor i in range(6, 12):\n temp_sum += H[i]\nI.append(temp_sum)\n\ntemp_sum = 0\nfor i in range(12, 18):\n temp_sum += H[i]\nI.append(temp_sum)\n\ntemp_sum = 0\nfor i in range(18, 24):\n temp_sum += H[i]\nI.append(temp_sum)\n\ntemp_sum = 0\nfor i in range(24, 30):\n temp_sum += H[i]\nI.append(temp_sum)\n\ntemp_sum = 0\nfor i in range(30, 36):\n temp_sum += H[i]\nI.append(temp_sum)\nprint(I)\n# A = X_smooth[signal.argrelextrema(X_smooth, np.less)]\n# D = signal.argrelextrema(X_smooth, np.less)[0]\n# X = np.array(X)\n# B = X[signal.argrelextrema(X, np.less)]\n# k=len(A)\n# for i in range(0,k-1):\n# if(A[i]<-20):\n# C.append(A[i])\n# E.append(D[i])\n# print(A[i])\n\n\n# for i in range(0,k-1):\n# if(B[i]<-20):\n# print(B[i])\n# print(X_smooth[signal.argrelextrema(X_smooth, np.less)])\n# print(signal.argrelextrema(X_smooth, np.less))\n# plt.subplot(3,1,3)\n# plt.plot(Z)\n\n# a=DTWDistance(X,Y)\n# b=DTWDistance(X,Z)\n# print(a)\n# print(b)\n# plt.subplot(2,1,1)\n# plt.plot(X)\n# plt.plot(signal.argrelextrema(X, np.less)[0],X[signal.argrelextrema(X, np.less)],'+', markersize=10)\n# plt.subplot(2,1,2)\n# plt.plot(X_smooth)\n# plt.plot(E,C,'+', markersize=10)\n# plt.show()\n","sub_path":"DTW.py","file_name":"DTW.py","file_ext":"py","file_size_in_byte":4067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"501254514","text":"import tkinter as tk\r\nimport random\r\n\r\nwindow = tk.Tk()\r\nwindow.geometry(\"600x400\")\r\nwindow.config(bg=\"#808080\")\r\nwindow.resizable(width=False, height=False)\r\nwindow.title(\"Number Guessing Game\")\r\n\r\n\r\nTarget = 1\r\nRetries = 0\r\n\r\ndef updateResult(text):\r\n result.configure(text=text)\r\n\r\ndef new_game():\r\n guess_button.config(state=\"normal\")\r\n global Target, Retries\r\n Target = random.randint(0, 10)\r\n Retries = 0\r\n updateResult(text=\"Guess a number between 1 to 10\")\r\n\r\n\r\ndef play_game():\r\n global Retries\r\n\r\n choice = int(num_form.get())\r\n if choice != Target:\r\n Retries +=1\r\n result = \"Wrong guess\"\r\n\r\n if Target < choice:\r\n hint = \"The num lies between 0 and {}\".format(result)\r\n else:\r\n hint = \"The num lies between {} and 10\".format(choice)\r\n result += \"\\n\\nHint:\\n \" +hint\r\n\r\n else:\r\n result = \"Yay.....its crct after {} retries\".format(Retries)\r\n guess_button.configure(state=\"disabled\")\r\n result += \"\\n\" + \"Start new game\"\r\n\r\n updateResult(result)\r\n\r\nplay_button = tk.Button(window, text=\"Play Game\", font=(\"Arial\", 10), fg=\"Black\", bg=\"#29c70a\", command=new_game)\r\n\r\nguess_button = tk.Button(window, text=\"Guess\", font=(\"Arial\", 14), fg=\"White\", bg=\"#b82741\", command=play_game)\r\n\r\n\r\nexit_button = tk.Button(window, text=\"Exit\", font=(\"Arial\", 14), fg=\"White\", bg=\"#b82741\", command=exit)\r\nexit_button.place(x=340, y=320)\r\n\r\ntitle = tk.Label(window, text=\"Guessing Number\", font=(\"Arial\", 24), fg=\"#fffcbd\", bg=\"#808080\")\r\nresult = tk.Label(window, text=\"click to play\", font=(\"Arial\", 14), fg=\"White\", bg=\"#808080\", justify=tk.LEFT)\r\ntitle.place(x=170, y=50)\r\nresult.place(x=260, y=125)\r\n\r\nguessed_num = tk.StringVar()\r\nnum_form = tk.Entry(window, font=(\"Arial\", 11), textvariable=guessed_num)\r\nnum_form.place(x=225, y=290)\r\n\r\nplay_button.place(x=270, y=97)\r\nguess_button.place(x=220, y=320)\r\n\r\nwindow.mainloop()\r\n","sub_path":"day 29 project task/numberGuessing.py","file_name":"numberGuessing.py","file_ext":"py","file_size_in_byte":1934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"464469532","text":"import torch\nfrom fuel.datasets import H5PYDataset\nfrom torch.utils.data import Dataset\nimport h5py\n\nclass MujocoPusher3DofDataset(Dataset):\n \"\"\"loads the mujoco h5 recording file and makes it available for pytorch training\"\"\"\n\n def __init__(self, h5_file, for_training=True):\n \"\"\"\n Args:\n h5_file (string): Path to the h5 file\n for_training (bool): True if you want the training dataset, otherwise you get the testing split\n \"\"\"\n super(MujocoPusher3DofDataset, self).__init__()\n self.f = h5py.File(h5_file, \"r\")\n phase = \"train\"\n if not for_training:\n phase = \"valid\"\n self.f = H5PYDataset(h5_file, which_sets=(phase,))\n import ipdb; ipdb.set_trace()\n\n def __len__(self):\n return self.f.num_examples\n\n def __getitem__(self, idx):\n handle = self.f.open()\n #import ipdb; ipdb.set_trace()\n data = self.f.get_data(handle, slice(idx, idx + 1))\n\n # items:\n # 0-6 - joint angles\n # 7-13 - joint velocities\n # 14-16 - tip position (x,y,z)\n # 17-19 - obj position (x,y,z)\n # 20-22 - goal position (x,y,z)\n relevant_items = range(14) # both 7 angles and 7 velocities\n episode = {#'state_joints': torch.from_numpy(data[2][0][:, relevant_items]),\n # 'state_img': self._totensor(data[1][0]),\n # 'action': torch.from_numpy(data[0][0]),\n 'state_next_sim_joints': torch.from_numpy(data[8][0][:, relevant_items]),\n # 'state_next_sim_img': self._totensor(data[7][0]),\n 'state_next_real_joints': torch.from_numpy(data[4][0][:, relevant_items])\n # 'state_next_real_img': self._totensor(data[3][0])\n }\n\n self.f.close(handle)\n\n return episode\n\nif __name__ == '__main__':\n ms1d = MujocoPusher3DofDataset(\"/data/lisa/data/sim2real/mujoco_data2_pusher.h5\")\n print (\"loaded dataset with {} episodes\".format(len(ms1d)))\n sample = ms1d[0]\n state_next_sim_joints = sample[\"state_next_sim_joints\"]\n state_next_real_joints = sample[\"state_next_real_joints\"]\n\n print (state_next_sim_joints.size())\n print (state_next_real_joints.size())\n\n print(state_next_sim_joints[100:103])\n print(state_next_real_joints[100:103])\n","sub_path":"neural_augmented_simulator/old-code/simple_joints_lstm/mujoco_dataset_pusher3dof.py","file_name":"mujoco_dataset_pusher3dof.py","file_ext":"py","file_size_in_byte":2346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"129368614","text":"import math\nfrom PyQt4 import QtGui\nfrom .utils import Object\n\n\n__author__ = \"Yuehao Wang\"\n\n\nclass Point(Object):\n\tdef __init__(self, x = 0, y = 0):\n\t\tsuper(Point, self).__init__()\n\n\t\tif not isinstance(x, (int, float)):\n\t\t\tx = 0\n\t\tif not isinstance(y, (int, float)):\n\t\t\ty = 0\n\t\t\n\t\tself.x = x\n\t\tself.y = y\n\n\tdef __str__(self):\n\t\treturn \"Point(%s, %s)\" % (self.x, self.y)\n\n\tdef distance(p1, p2):\n\t\treturn Point.distance2(p1.x, p1.y, p2.x, p2.y)\n\n\tdef distance2(x1, y1, x2, y2):\n\t\tx = x1 - x2\n\t\ty = y1 - y2\n\n\t\treturn math.sqrt(x * x + y * y)\n\n\tdef interpolate(p1, p2, f):\n\t\treturn Point(p1.x + (p2.x - p1.x) * (1 - f), p1.y + (p2.y - p1.y) * (1 - f))\n\n\tdef polar(l, a):\n\t\treturn Point(l * math.cos(a), l * math.sin(a))\n\n\tdef length(self):\n\t\treturn Point.distance2(self.x, self.y, 0, 0)\n\n\tdef add(self, v):\n\t\treturn Point(self.x + v.x, self.y + v.y)\n\n\tdef setTo(self, x, y):\n\t\tif not isinstance(x, (int, float)):\n\t\t\tx = 0\n\t\tif not isinstance(y, (int, float)):\n\t\t\ty = 0\n\n\t\tself.x = x\n\t\tself.y = y\n\n\tdef copyFrom(self, s):\n\t\tself.setTo(s.x, s.y)\n\n\tdef equals(self, t):\n\t\treturn self.x == t.x and self.y == t.y\n\n\tdef normalize(self, t):\n\t\tscale = t / self.length()\n\n\t\tself.x *= scale\n\t\tself.y *= scale\n\n\tdef offset(self, dx, dy):\n\t\tself.x += dx\n\t\tself.y += dy\n\n\tdef substract(self, v):\n\t\treturn Point(self.x - v.x, self.y - v.y)\n\n\nclass Rectangle(Object):\n\tdef __init__(self, x = 0, y = 0, w = 0, h = 0):\n\t\tsuper(Rectangle, self).__init__()\n\n\t\tif not isinstance(x, (int, float)):\n\t\t\tx = 0\n\t\tif not isinstance(y, (int, float)):\n\t\t\ty = 0\n\t\tif not isinstance(w, (int, float)):\n\t\t\tw = 0\n\t\tif not isinstance(h, (int, float)):\n\t\t\th = 0\n\t\t\n\t\tself.left = 0\n\t\tself.right = 0\n\t\tself.top = 0\n\t\tself.bottom = 0\n\t\tself.__x = x\n\t\tself.__y = y\n\t\tself.__width = w\n\t\tself.__height = h\n\t\tself.x = x\n\t\tself.y = y\n\t\tself.width = w\n\t\tself.height = h\n\n\t@property\n\tdef x(self):\n\t\treturn self.__x\n\n\t@x.setter\n\tdef x(self, v):\n\t\tself.__x = v\n\n\t\tself.left = v\n\t\tself.right = v + self.width\n\n\t@property\n\tdef y(self):\n\t\treturn self.__y\n\n\t@y.setter\n\tdef y(self, v):\n\t\tself.__y = v\n\n\t\tself.top = v\n\t\tself.bottom = v + self.height\n\n\t@property\n\tdef width(self):\n\t\treturn self.__width\n\n\t@width.setter\n\tdef width(self, v):\n\t\tself.__width = v\n\n\t\tself.right = v + self.x\n\n\t@property\n\tdef height(self):\n\t\treturn self.__height\n\n\t@height.setter\n\tdef height(self, v):\n\t\tself.__height = v\n\n\t\tself.bottom = v + self.y\n\n\tdef contains(self, x, y):\n\t\treturn self.x <= x <= self.right and self.y <= y <= self.bottom\n\n\tdef containsRect(self, rect):\n\t\treturn rect.x >= self.x and rect.right <= self.right and rect.y >= self.y and rect.bottom <= self.bottom\n\t\t\n\tdef equals(self, v):\n\t\treturn v.x == self.x and v.width == self.width and v.y == self.y and v.height == self.height\n\n\tdef inflate(self, dx, dy):\n\t\tself.width += dx\n\t\tself.height += dy\n\n\tdef intersection(self, t):\n\t\tix = self.x if self.x > t.x else t.x\n\t\tiy = self.y if self.y > t.y else t.y\n\t\tax = t.right if self.right > t.right else self.right\n\t\tay = t.bottom if self.bottom > t.bottom else self.bottom\n\n\t\tif ix <= ax and iy <= ay:\n\t\t\treturn Rectangle(ix, iy, ax, ay)\n\t\telse:\n\t\t\treturn Rectangle(0, 0, 0, 0)\n\n\tdef intersects(self, t):\n\t\tix = self.x if self.x > t.x else t.x\n\t\tiy = self.y if self.y > t.y else t.y\n\t\tax = t.right if self.right > t.right else self.right\n\t\tay = t.bottom if self.bottom > t.bottom else self.bottom\n\n\t\treturn ix <= ax and iy <= ay\n\n\tdef isEmpty(self):\n\t\treturn self.x == 0 and self.y == 0 and self.width == 0 and self.height == 0\n\n\tdef offset(self, dx, dy):\n\t\tself.x += dx\n\t\tself.y += dy\n\n\tdef setEmpty(self):\n\t\tself.x = 0\n\t\tself.y = 0\n\t\tself.width = 0\n\t\tself.height = 0\n\n\tdef setTo(self, x, y, w, h):\n\t\tif not isinstance(x, (int, float)):\n\t\t\tx = 0\n\t\tif not isinstance(y, (int, float)):\n\t\t\ty = 0\n\t\tif not isinstance(w, (int, float)):\n\t\t\tw = 0\n\t\tif not isinstance(h, (int, float)):\n\t\t\th = 0\n\n\t\tself.x = x\n\t\tself.y = y\n\t\tself.width = w\n\t\tself.height = h\n\n\tdef union(self, t):\n\t\treturn Rectangle(t.x if self.x > t.x else self.x, t.y if self.y > t.y else self.y, self.right if self.right > t.right else t.right, self.bottom if self.bottom > t.bottom else t.bottom)\n\n\nclass Matrix(Object):\n\tdef __init__(self, a = None, b = None, c = None, d = None, tx = None, ty = None, u = None, v = None, w = None):\n\t\tsuper(Matrix, self).__init__()\n\n\t\tself.a = 1\n\t\tself.b = 0\n\t\tself.u = 0\n\t\tself.c = 0\n\t\tself.d = 1\n\t\tself.v = 0\n\t\tself.tx = 0\n\t\tself.ty = 0\n\t\tself.w = 1\n\t\t\n\t\tself.setTo(a, b, c, d, tx, ty, u, v, w)\n\n\tdef setTo(self, a = None, b = None, c = None, d = None, tx = None, ty = None, u = None, v = None, w = None):\n\t\tif a != None:\n\t\t\tself.a = a\n\n\t\tif b != None:\n\t\t\tself.b = b\n\n\t\tif c != None:\n\t\t\tself.c = c\n\n\t\tif d != None:\n\t\t\tself.d = d\n\n\t\tif tx != None:\n\t\t\tself.tx = tx\n\n\t\tif ty != None:\n\t\t\tself.ty = ty\n\n\t\tif u != None:\n\t\t\tself.u = u\n\n\t\tif v != None:\n\t\t\tself.v = v\n\n\t\tif w != None:\n\t\t\tself.w = w\n\n\tdef isDentity(self):\n\t\treturn (self.a == 1 and self.b == 0 and self.c == 0 and self.d == 1 and self.tx == 0 and self.ty == 0 and self.u == 0 and self.v == 0 and self.w == 1)\n\n\tdef transform(self, c):\n\t\tc.setTransform(self.toQTransform(), True)\n\n\tdef identity(self):\n\t\tself.setTo(1, 0, 0, 1, 0, 0, 0, 0, 1)\n\t\n\tdef rotate(self, q):\n\t\tradian = q * math.pi / 180\n\t\tcos = math.cos(radian)\n\t\tsin = math.sin(radian)\n\t\t\n\t\tmtx = Matrix(cos, sin, -sin, cos, 0, 0, 0, 0, 1)\n\t\tself.add(mtx)\n\n\t\treturn self\n\n\tdef scale(self, sx, sy):\n\t\tmtx = Matrix(sx, 0, 0, sy, 0, 0, 0, 0, 1)\n\t\tself.add(mtx)\n\t\t\n\t\treturn self\n\n\tdef translate(self, tx, ty):\n\t\tmtx = Matrix(1, 0, 0, 1, tx, ty, 0, 0, 1)\n\t\tself.add(mtx)\n\n\t\treturn self\n\n\tdef skew(self, kx, ky):\n\t\tmtx = Matrix(1, ky, kx, 1, 0, 0, 0, 0, 1)\n\t\tself.add(mtx)\n\n\t\treturn self\n\n\tdef add(self, mtx):\n\t\ta = self.a * mtx.a + self.b * mtx.c + self.u * mtx.tx\n\t\tb = self.a * mtx.b + self.b * mtx.d + self.u * mtx.ty\n\t\tu = self.a * mtx.u + self.b * mtx.v + self.u * mtx.w\n\t\tc = self.c * mtx.a + self.d * mtx.c + self.v * mtx.tx\n\t\td = self.c * mtx.b + self.d * mtx.d + self.v * mtx.ty\n\t\tv = self.c * mtx.u + self.d * mtx.v + self.v * mtx.w\n\t\ttx = self.tx * mtx.a + self.ty * mtx.c + self.w * mtx.tx\n\t\tty = self.tx * mtx.b + self.ty * mtx.d + self.w * mtx.ty\n\t\tw = self.tx * mtx.u + self.ty * mtx.v + self.w * mtx.w\n\t\t\n\t\tself.setTo(a, b, c, d, tx, ty, u, v, w)\n\n\tdef toArray(self, mtx):\n\t\tif isinstance(mtx, list) and len(mtx) == 3:\n\t\t\tm = mtx[0] * self.a + mtx[1] * self.c + mtx[2] * self.tx\n\t\t\tn = mtx[0] * self.b + mtx[1] * self.d + mtx[2] * self.ty\n\t\t\tk = mtx[0] * self.u + mtx[1] * self.v + mtx[2] * self.w\n\t\t\t\n\t\t\treturn [m, n, k]\n\t\telse:\n\t\t\ta = self.a * mtx.a + self.b * mtx.c + self.u * mtx.tx\n\t\t\tb = self.a * mtx.b + self.b * mtx.d + self.u * mtx.ty\n\t\t\tu = self.a * mtx.u + self.b * mtx.v + self.u * mtx.w\n\t\t\tc = self.c * mtx.a + self.d * mtx.c + self.v * mtx.tx\n\t\t\td = self.c * mtx.b + self.d * mtx.d + self.v * mtx.ty\n\t\t\tv = self.c * mtx.u + self.d * mtx.v + self.v * mtx.w\n\t\t\ttx = self.tx * mtx.a + self.ty * mtx.c + self.w * mtx.tx\n\t\t\tty = self.tx * mtx.b + self.ty * mtx.d + self.w * mtx.ty\n\t\t\tw = self.tx * mtx.u + self.ty * mtx.v + self.w * mtx.w\n\t\t\t\n\t\t\treturn [a, b, c, d, tx, ty, u, v, w]\n\n\tdef toList(self, mtx):\n\t\treturn self.toArray(mtx)\n\n\tdef toQTransform(self):\n\t\treturn QtGui.QTransform(self.a, self.b, self.c, self.d, self.tx, self.ty)\n\n\nclass Transform(Object):\n\tdef __init__(self):\n\t\tsuper(Transform, self).__init__()\n\n\t\tself.matrix = None","sub_path":"pylash/geom.py","file_name":"geom.py","file_ext":"py","file_size_in_byte":7315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"565859091","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('app', '0002_auto_20161029_2338'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Check_list',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=255, verbose_name=b'Name')),\n ('category', models.CharField(max_length=255, verbose_name=b'Category', blank=True)),\n ],\n ),\n migrations.RemoveField(\n model_name='impression',\n name='book',\n ),\n migrations.DeleteModel(\n name='Book',\n ),\n migrations.DeleteModel(\n name='Impression',\n ),\n ]\n","sub_path":"DjangoWebProject4/app/migrations/0003_auto_20161031_1310.py","file_name":"0003_auto_20161031_1310.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"346548272","text":"from bookut import extremum,mean\nfrom exut import ex\n\n# 分析极值分布\ndef extremum1():\n ex.InitSht()\n ex.LoopIdx = 3\n extremum.amplitude = 0.02\n while True:\n str1,i = ex.BookSpeNext(7)\n print(\"----- 行数\",i,str1)\n if str1 == None or str1 == '':\n break\n vv = StrToMean(str1)\n extremum.PutNewV(i,vv)\n\n if ex.LoopIdx >= 30000:\n break\n\n extremum.DumpHisty()\n\n\ndef StrToMean(ss):\n strs = ss.split(\",\")\n for i in range(2):\n strs[i] = float(strs[i])\n me = mean.mean(strs[0],strs[1])\n return me\nextremum1()\n\n\n\n\n\n\n","sub_path":"Alchemy/analyEx.py","file_name":"analyEx.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"472743621","text":"# -*- coding: utf-8 -*-\n# @Time : 2019-07-21 16:21\n# @Author : jesse\n# @File : python02.线程数据共享.py\n\n'''\n演示主线程和子线程之间数据是共享的,无需通过队列或者管道来共享数据.\n\n'''\n\nfrom threading import Thread\n\ndef func():\n global g\n g += 1\n print(g)\n\n\ng = 100\nt_list = []\n\nfor i in range(10):\n t = Thread(target=func)\n t.start()\n\nfor t in t_list:\n t.join()\n\nprint(g)\n","sub_path":"python05-network_program/全栈9期day39-线程/python02.线程数据共享.py","file_name":"python02.线程数据共享.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"368763838","text":"__author__ = 'byeungchun'\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jul 02 17:16:32 2015\n\n@author: by003457\n\"\"\"\n\nimport sys\nimport datetime\nimport pandas as pd\nimport scipy\nsys.path.append('./gexf')\n\nfrom gexf import Gexf\n\ndef test1():\n gf = Gexf(\"BIS LBS_D\",\"Hello LBS_D\")\n graph = gf.addGraph(\"directed\",\"static\",\"Hello LBS_D graph\")\n graph.addNodeAttribute(\"continent\",type=\"string\")\n graph.addEdgeAttribute(\"CbsBankingType\", defaultValue=\"DomesticBankAll\", type=\"string\")\n graph.addEdgeAttribute(\"CbsReportingBasis\",defaultValue=\"ImmediateRiskBasis\",type=\"string\")\n graph.addEdgeAttribute(\"BalanceSheetPosition\",defaultValue=\"LocalClaims\",type=\"string\")\n\n graph.addNode(\"NL\",\"Netherlands\")\n graph._nodes[\"NL\"].addAttribute(\"0\",\"Europe\")\n graph.addNode(\"ZA\",\"South Africa\")\n graph.addEdge(\"NLZA001\",\"NL\",\"ZA\")\n graph._edges[\"NLZA001\"].addAttribute(\"0\",\"DomesticBankAll\")\n graph._edges[\"NLZA001\"].addAttribute(\"1\",\"ImmediateRiskBasis\")\n graph._edges[\"NLZA001\"].addAttribute(\"2\",\"LocalClaims\")\n of = open(\"lbsDt2.gexf\",\"bw\")\n gf.write(of)\n of.close()\n return graph\n\nclass CbsPubEty:\n def __init__(self):\n self.arrDocs = []\n self.arrNetworks = []\n self.arrNodes = []\n self.cntryCoord = pd.read_csv('countryCoordinates.csv',sep='\\t',index_col = 'country')\n self.arrCountries = ['AT','BE','BG','CA','CH','CN','CY','CZ','DE','DK','EE','ES','FI','FR','GB','GR','HK','HR','HU','ID','IE','IL','IN','IS','IT','JP','KR','LT','LU','LV','MK','MT','MY','NL','NO','PH','PL','PT','RO','RU','SE','SG','SI','SK','TH','TR','US']\n self.cbsGf = Gexf(\"BIS_CBS\",\"Hello BIS_CBS\")\n self.excelfile = 'BY003457_2015_07_14_16_43_16_20.xlsx'\n self.cbsGraph = self.cbsGf.addGraph(\"directed\",\"dynamic\",\"BIS_CBS\",timeformat=\"date\")\n \n self.cbsGraph.addNodeAttribute(\"latitude\",0.0,type=\"float\",mode=\"static\",force_id=\"latitude\") \n self.cbsGraph.addNodeAttribute(\"longitude\",0.0,type=\"float\",mode=\"static\",force_id=\"longitude\") \n self.cbsGraph.addEdgeAttribute(\"weight\",0.0, type=\"float\",mode=\"dynamic\",force_id=\"weight\")\n \n #print('attribute id ' + self.eAttrValue)\n \n\n\n def insertDocData(self,arrRow):\n _dicDoc = {}\n _dicDoc['cbs_data_set'] = arrRow[0]\n _dicDoc['code'] = arrRow[0]+':Q'+arrRow[1]\n _dicDoc['start_date'] = arrRow[2]\n _dicDoc['end_date'] = arrRow[3]\n _dicDoc['frequency'] = arrRow[4]\n _dicDoc['measure'] = arrRow[5]\n _dicDoc['reporting_country'] = arrRow[6]\n _dicDoc['cbs_bank_type'] = arrRow[7]\n _dicDoc['cbs_reporting_basis'] = arrRow[8]\n _dicDoc['balance_sheet_position'] = arrRow[9]\n _dicDoc['type_of_instruments'] = arrRow[10]\n _dicDoc['remaining_maturity'] = arrRow[11]\n _dicDoc['currency_type_of_booking_location'] = arrRow[12]\n _dicDoc['counterparty_sector'] = arrRow[13]\n _dicDoc['counterparty_country'] = arrRow[14]\n self.arrDocs.append(_dicDoc)\n\n def genValueData(self,excelfile):\n df = pd.read_excel(excelfile,sheetname='Quarterly Series')\n df.columns = df.ix[1,:]\n df = df.ix[2:,:]\n df.index = df.ix[:,0]\n df = df.ix[:,1:]\n return df\n\n def genDocData(self,excelfile):\n df = pd.read_excel(excelfile,sheetname = 'Summary Documentation')\n for i in range(df.shape[0]):\n arrRow = df.ix[i,:].tolist()\n self.insertDocData(arrRow)\n\n def addCountryToNodes(self,iso2, cntry,_frDate,_toDate):\n try:\n self.arrNodes.index(iso2)\n except:\n self.arrNodes.append(iso2)\n #self.cbsGraph.addNode(iso2,cntry,start=_frDate,end=_toDate)\n #_node = self.cbsGraph.addNode(iso2,cntry,start=_frDate)\n _node = self.cbsGraph.addNode(iso2,cntry,start=\"1984-01-31\")\n _node.addAttribute(\"latitude\",str(float(self.cntryCoord[self.cntryCoord.index == iso2].latitude)))\n _node.addAttribute(\"longitude\",str(float(self.cntryCoord[self.cntryCoord.index == iso2].longitude)))\n\n\n def genNetworkData(self,excelfile):\n self.genDocData(excelfile)\n dfVal = self.genValueData(excelfile)\n for i in range(len(self.arrDocs)):\n _df= dfVal[self.arrDocs[i]['code']].dropna()\n _frIso = self.arrDocs[i]['code'].split(':')[3]\n _toIso = self.arrDocs[i]['code'].split(':')[-1]\n try:\n self.arrCountries.index(_frIso)\n self.arrCountries.index(_toIso)\n _from = self.arrDocs[i]['reporting_country']\n self.addCountryToNodes(_frIso,_from,_df.index[0].strftime('%Y-%m-%d'),_df.index[-1].strftime('%Y-%m-%d'))\n _to = self.arrDocs[i]['counterparty_country']\n self.addCountryToNodes(_toIso,_to,_df.index[0].strftime('%Y-%m-%d'),_df.index[-1].strftime('%Y-%m-%d'))\n except:\n continue\n print(_frIso+_toIso,_frIso,_toIso)\n _edge = self.cbsGraph.addEdge(_frIso+_toIso,_frIso,_toIso)\n for j in range(_df.shape[0]):\n if j == 0:\n _frDate = (_df.index[j] - datetime.timedelta(3*365/12) + datetime.timedelta(1)).strftime('%Y-%m-%d')\n _toDate = _df.index[j].strftime('%Y-%m-%d')\n else:\n _frDate = _df.index[j-1].strftime('%Y-%m-%d')\n _toDate = _df.index[j].strftime('%Y-%m-%d')\n _val = _df.ix[j]\n try:\n float(_val)\n except:\n continue\n #print(_frIso+_toIso,_val)\n _edge.addAttribute(\"weight\" ,str(_val),start=_frDate, end=_toDate)\n #self.cbsGraph.addEdge(_frIso+_toIso+str(j),_frIso,_toIso,weight=_val,start=_frDate, end=_toDate)\n\n cbsFile = open('cbsNw2.gexf','bw')\n self.cbsGf.write(cbsFile)\n cbsFile.close()\n\n\nexcelfile= 'BY003457_2015_07_14_16_43_16_20.xlsx'\ncpe = CbsPubEty()\ncpe.genNetworkData(excelfile)\n","sub_path":"genGexf.py","file_name":"genGexf.py","file_ext":"py","file_size_in_byte":6079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"492317440","text":"# -*- coding: utf-8 -*-\n\"\"\"\nTests for the simple cycle algorithm\n\"\"\"\nfrom simple_cycle import *\nfrom guo_hall import AlgBody as guo_body\nfrom blur import AlgBody as blur_body\nfrom adaptive import AlgBody as adaptive_body\nimport networkx as nx\nimport cv2\nimport unittest\n\n__authors__ = {\"Andreas Firczynski\": \"andreasfir91@googlemail.com\"}\n\n\nclass simple_cycle_test(unittest.TestCase):\n\n def test_instantiation(self):\n \"\"\"\n Test the instantiation.\n \"\"\"\n alg = AlgBody()\n self.assertEqual(alg.name, \"Simple cycle filter\")\n self.assertEqual(alg.parent, \"Graph filtering\")\n\n def test_process(self):\n alg= AlgBody()\n\n #Detect the graph from an image\n pp_alg = blur_body()\n seg_alg = adaptive_body()\n gd_alg = guo_body()\n img = cv2.imread(\"p_polycephalum.jpg\")\n graph = \"\"\n pp_alg.process([img,graph])\n seg_alg.process([pp_alg.result['img'],pp_alg.result['graph']])\n gd_alg.process([seg_alg.result['img'],seg_alg.result['graph']])\n\n alg.process([gd_alg.result['img'],gd_alg.result['graph']])\n\n #Should be\n graph = self.should_alg(gd_alg.result['graph'])\n\n self.assertEqual(alg.result['graph'],graph)\n\n def should_alg(self,arg):\n\n nodes_not_in_a_cycle = set(arg.nodes())\n # filter all nodes which are not in a biconnected component\n for component in nx.biconnected_components(arg):\n if len(component) > 2:\n nodes_not_in_a_cycle -= component\n # remove all nodes which are not in a biconnected component from\n # the graph\n arg.remove_nodes_from(nodes_not_in_a_cycle)\n return arg\n\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"nefi2/unittests/unittest_model/unittest_algorithms/simple_cycle_test.py","file_name":"simple_cycle_test.py","file_ext":"py","file_size_in_byte":1742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"394444450","text":"# Usage: python eval.py \nimport sys, os, re, importlib\n# print(\"command line arguments:\", sys.argv[:])\nprogram_name = sys.argv[0]\ntemplatefile = sys.argv[1]\nif not re.search(\"\\.py$\", templatefile):\n templatefile = templatefile + \".py\"\nif not os.path.exists(templatefile):\n raise Exception(\"%s:No such templatefile file exists\" % templatefile)\n sys.exit()\n# print(\"templatefile:\", templatefile)\n# pckj = \"/\".join(templatefile.split(\"/\")[:-1])\n# os.chdir(pckj)\n# print(\"templatefile:\", templatefile)\ntemplatefile = templatefile.split(\"/\")[-1]\ntemplatefile = templatefile.replace(\".py\", \"\")\n# print(\"templatefile:\", templatefile)\nmodule_name = importlib.import_module(templatefile)\nallfiles = sys.argv[2:]\n# print(\"program_name:\", program_name)\n# print(\"templatefile:\", templatefile)\n# print(\"allfiles:\", allfiles)\n# print(\"execution directory:\", os.getcwd())\nkey_value, debugging_values = module_name.init_variables()\n# print(key_value)\nfor filename in allfiles:\n if os.path.exists(filename):\n # print()\n # print(\"filename:\", filename)\n file = open(filename, \"r\")\n # print(\"for filename \", filename, \":\")\n for line in file:\n content = line.split(\":\", maxsplit=1)\n # print(\"content:\", content)\n # print(\"content[0]:\", content[0])\n # if content[0] == \"episode\":\n # key_value, debugging_values = module_name.episode(key_value, content[1])\n try:\n func = getattr(module_name, content[0])\n key_value, debugging_values = func(key_value, content[1])\n # print(\"debugging_values for %s:\" % content[0], debugging_values)\n # if content[0] == \"exercise\" or content[0] == \"water\":\n # print(\"debugging_values for %s:\" % content[0], debugging_values)\n except:\n # raise Exception(\"No such function\")\n # print(\"%s:no such function\" % content[0])\n pass\n# print(key_value)\ndict(map(lambda x: (x[0], x[1].remove(\"NA\") if len(x[1]) > 1 and \"NA\" in x[1] else x[1]), key_value.items()))\n# print(key_value)\n# print(\"key_value[\\\"song\\\"]\", key_value[\"song\"])\nkey_value = dict(map(lambda x: (x[0], \", \".join(str(y) for y in x[1])), key_value.items()))\n# print(key_value)\n# print(key_value['episode'])\nfor x in key_value.items():\n # print(x[1])\n # print(list(\", \".join(str(y) for y in x[1])))\n print()\n print(\"%s: %s\"%(x[0], x[1]))\n # pass\n","sub_path":"eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":2542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"271916957","text":"import time\n\nimport numpy as np\nimport pandas as pd\nfrom bayes_opt import BayesianOptimization\n\nfrom oolearning.model_processors.CloneableFactory import CloneableFactory\nfrom oolearning.evaluators.CostFunctionMixin import CostFunctionMixin\nfrom oolearning.evaluators.UtilityFunctionMixin import UtilityFunctionMixin\nfrom oolearning.model_processors.BayesianOptimizationTunerResults import BayesianOptimizationTunerResults\nfrom oolearning.model_processors.ModelTunerBase import ModelTunerBase\nfrom oolearning.model_processors.ResamplerBase import ResamplerBase\nfrom oolearning.model_processors.TunerResultsBase import TunerResultsBase\nfrom oolearning.model_wrappers.HyperParamsBase import HyperParamsBase\n\n\nclass BayesianOptimizationModelTuner(ModelTunerBase):\n # noinspection SpellCheckingInspection\n \"\"\"\n A BayesianOptimizationModelTuner is a wrapper around around github/fmfn/BayesianOptimization\n (https://github.com/fmfn/BayesianOptimization) that searches for the\n uses a Resampler for tuning a single model across various hyper-parameters.\n In other words, it runs a specified Resampler repeatedly over a combination of hyper-parameters,\n finding the \"best\" potential model as well as related information.\n \"\"\"\n def __init__(self,\n resampler: ResamplerBase,\n hyper_param_object: HyperParamsBase,\n parameter_bounds: dict,\n init_points: int,\n n_iter: int,\n verbose: int = 2,\n seed: int = 42\n ):\n \"\"\"\n :param resampler:\n :param hyper_param_object:\n \"\"\"\n super().__init__()\n\n assert isinstance(resampler, ResamplerBase)\n\n self._resampler_factory = CloneableFactory(resampler)\n self._hyper_param_factory = CloneableFactory(hyper_param_object)\n self._parameter_bounds = parameter_bounds\n self._init_points = init_points\n self._n_iter = n_iter\n self._verbose = verbose\n self._seed = seed\n\n def _tune(self, data_x: pd.DataFrame, data_y: np.ndarray) -> TunerResultsBase:\n\n global resampler_results\n resampler_results = list()\n\n global resampler_times\n resampler_times = list()\n\n global temp_hyper_params\n temp_hyper_params = list()\n\n # need global functions otherwise I get \"function not defined\" in `optimizer.maximize()`\n # noinspection PyGlobalUndefined\n global temp_objective_function\n\n # noinspection PyUnusedLocal,PyRedeclaration\n def temp_objective_function(locals_dictionary: dict):\n # this will be passed in a diction from `locals()` call in the dynamic objective function which\n # will contain a dictionary of parameters with the corresponding values, which is exactly what\n # `update_dict()` takes\n\n # if(locals_dictionary is None):\n # return None\n\n local_hyper_params = self._hyper_param_factory.get()\n local_hyper_params.update_dict(locals_dictionary)\n temp_hyper_params.append(local_hyper_params)\n\n local_resampler = self._resampler_factory.get()\n\n resample_start_time = time.time()\n local_resampler.resample(data_x=data_x, data_y=data_y, hyper_params=local_hyper_params)\n resampler_times.append(time.time() - resample_start_time)\n resampler_results.append(local_resampler.results)\n\n first_score_object = local_resampler.results.scores[0][0]\n\n assert isinstance(first_score_object, CostFunctionMixin) or \\\n isinstance(first_score_object, UtilityFunctionMixin)\n\n resample_mean = local_resampler.results.score_means[first_score_object.name]\n\n # if the first score object passed in to the resampler is a Cost Function, we want to **Minimize**\n # the score, so we need to multiply it by negative 1 since we the optimizer maximizes\n if isinstance(local_resampler.results.scores[0][0], CostFunctionMixin):\n resample_mean = resample_mean * -1\n\n return resample_mean\n\n parameter_names = list(self._parameter_bounds.keys())\n\n objective_parameters = \", \".join(parameter_names)\n objective_function_string = \"global objective_function\\n\" \\\n \"def objective_function({0}): return temp_objective_function(locals())\"\n objective_function_string = objective_function_string.format(objective_parameters)\n exec(objective_function_string)\n\n # noinspection PyUnresolvedReferences\n optimizer = BayesianOptimization(f=objective_function,\n pbounds=self._parameter_bounds,\n verbose=self._verbose,\n random_state=self._seed)\n optimizer.maximize(init_points=self._init_points, n_iter=self._n_iter)\n\n assert len(resampler_results) == self._n_iter + self._init_points\n\n # ensure the target values email the mean resampled score\n # if the score object is a CostFunctionMixin, must multiply by -1 since we had to multiple by -1\n # above to so that we can optimize for the smallest value (e.g. RMSE)\n multiplier = -1 if isinstance(resampler_results[0].scores[0][0], CostFunctionMixin) else 1\n assert [multiplier * ob['target'] for ob in optimizer.res] ==\\\n [result.score_means[resampler_results[0].scores[0][0].name]\n for result in resampler_results]\n\n # the hyper-params of each resampler\n all_params_every_resampler = [params_object.params_dict for params_object in temp_hyper_params]\n # extract the params corresponding to the parameters we are optimizing according to parameter_bounds\n optimizer_params_dict = [{x: the_dict[x] for x in parameter_names\n if x in the_dict} for the_dict in all_params_every_resampler]\n\n return BayesianOptimizationTunerResults(resampler_results=resampler_results,\n hyper_params_combos=optimizer_params_dict,\n resampler_times=[str(round(x, 1)) + \" Seconds\" for x in resampler_times], # noqa\n optimizer=optimizer)\n","sub_path":"oolearning/model_processors/BayesianOptimizationModelTuner.py","file_name":"BayesianOptimizationModelTuner.py","file_ext":"py","file_size_in_byte":6426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"215000648","text":"#Exercise: Assignment-2\n#Implement the updateHand function. Make sure this function has no side\n#effects: i.e., it must not mutate the hand passed in. Before pasting your\n#function definition here, be sure you've passed the appropriate tests in\n#test_ps4a.py.\n\n\ndef updateHand(hand, word):\n \"\"\"\n Assumes that 'hand' has all the letters in word.\n In other words, this assumes that however many times\n a letter appears in 'word', 'hand' has at least as\n many of that letter in it. \n\n Updates the hand: uses up the letters in the given word\n and returns the new hand, without those letters in it.\n\n Has no side effects: does not modify hand.\n\n word: string\n hand: dictionary (string -> int) \n returns: dictionary (string -> int)\n \"\"\"\n # TO DO ... <-- Remove this comment when you code this function\n updatehand = dict(hand)\n for letter in range(len(word)):\n if word[letter] in updatehand.keys():\n updatehand[word[letter]]-=1\n re = updatehand\n return re\n \n\ndef main():\n n=input()\n adict={}\n #print('adict :',adict)\n for i in range(int(n)):\n \n data = input()\n #print('data :', data)\n l = data.split()\n #print('l :', l)\n #print(adict)\n adict[l[0]] = int(l[1])\n #print('adict :', adict)\n #print()\n data1 = input()\n print(updateHand(adict,data1))\n \n\n\nif __name__== \"__main__\":\n main()","sub_path":"day_11/p2 (9)/p2/assignment2.py","file_name":"assignment2.py","file_ext":"py","file_size_in_byte":1446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"427088831","text":"#!/usr/bin/env python3\n\nimport argparse\nimport io\nimport os\nimport shutil\nimport sys\nimport tempfile\nimport unittest\n\nimport sym\n\n\nclass MupTests(unittest.TestCase):\n def setUp(self):\n self.tempdir = tempfile.mkdtemp()\n self.namespace = argparse.Namespace()\n self.namespace.pretend = False\n self.namespace.absolute = False\n self.namespace.relative = False\n\n def tearDown(self):\n 1 or shutil.rmtree(self.tempdir)\n\n def test_find(self):\n os.mkdir(self._path(\"1\"))\n os.symlink(\"a\", self._path(\"1/2\"))\n mtime_1 = self._mtime(\"1\")\n mtime_1_2 = self._mtime(\"1/2\")\n\n self.namespace.regex = True\n self.namespace.sub = [r\"prefix/\\1\"]\n self.namespace.directory = self.tempdir\n self.namespace.pattern = [\"(.*)\"]\n setattr(self.namespace, \"0\", False)\n code, out = getoutput(sym.Find().run, self.namespace)\n\n self.assertEqual(\n out, \"ln -s prefix/a {} [previously a]\".format(self._path(\"1/2\")))\n self.assertEqual(os.readlink(self._path(\"1/2\")), \"prefix/a\")\n self.assertEqual(mtime_1, self._mtime(\"1\"))\n self.assertEqual(mtime_1_2, self._mtime(\"1/2\"))\n\n def test_mirror(self):\n makepaths(self.tempdir, \"a/1/2 a/1/3 a/1/4\")\n\n self.namespace.src_dir = self._path(\"a\")\n self.namespace.dst_dir = self._path(\"b\")\n code, out = getoutput(sym.Mirror().run, self.namespace)\n\n self.assertEqual(out, \"\\n\".join([\"ln -s {} {}\"] * 3).format(\n self._path(\"a/1/2\"), self._path(\"b/1/2\"), self._path(\"a/1/3\"),\n self._path(\"b/1/3\"), self._path(\"a/1/4\"), self._path(\"b/1/4\")))\n self.assertEqual(self._mtime(\"a/1\"), self._mtime(\"b/1\"))\n self.assertEqual(self._mtime(\"a\"), self._mtime(\"b\"))\n self.assertEqual(os.readlink(self._path(\"b/1/2\")), self._path(\"a/1/2\"))\n\n def test_reverse(self):\n makepaths(self.tempdir, \"file\")\n os.symlink(self._path(\"file\"), self._path(\"link\"))\n mtime_dir = self._mtime(os.curdir)\n mtime_file = self._mtime(\"file\")\n\n self.namespace.symlink_path = [self._path(\"link\")]\n code, out = getoutput(sym.Reverse().run, self.namespace)\n\n self.assertEqual(out, \"mv {} {}\\nln -s {} {}\".format(\n self._path(\"file\"), self._path(\"link\"), self._path(\"link\"),\n self._path(\"file\")))\n self.assertEqual(os.readlink(self._path(\"file\")), self._path(\"link\"))\n self.assertEqual(mtime_file, self._mtime(\"file\"))\n self.assertEqual(mtime_file, self._mtime(\"link\"))\n self.assertEqual(mtime_dir, self._mtime(os.curdir))\n\n def _path(self, path):\n return os.path.join(self.tempdir, path)\n\n def _mtime(self, path):\n return os.stat(self._path(path), follow_symlinks=False).st_mtime_ns\n\n\ndef makepaths(root, paths):\n for path in paths.split():\n dirname, filename = os.path.split(path)\n dirpath = os.path.join(root, dirname)\n if not os.path.isdir(dirpath):\n os.makedirs(dirpath)\n if filename:\n filepath = os.path.join(dirpath, filename)\n with open(filepath, \"w\") as fileout:\n fileout.write(\"%s\\n\" % filepath)\n\n\ndef getoutput(func, *args, **kwargs):\n stdout = sys.stdout\n stderr = sys.stderr\n try:\n sys.stdout = io.StringIO()\n sys.stderr = sys.stdout\n try:\n func(*args, **kwargs)\n code = None\n except SystemExit as exception:\n code = exception.code\n return code, sys.stdout.getvalue().strip()\n finally:\n sys.stdout = stdout\n sys.stderr = stderr\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"src/2016/symlink/sym_test.py","file_name":"sym_test.py","file_ext":"py","file_size_in_byte":3687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"69655098","text":"from tkinter import *\nfrom tkinter import ttk\nfrom tkinter import messagebox as mb\nimport sqlite3\n# Coded by miormist :)\n\nclass Note:\n\n#-----------Defining section------------------\n\n def __init__(self,root):\n self.root=root\n self.root.title('Notemaker Application')\n self.root.geometry('1350x700+0+0')\n\n\n title=Label(self.root,text='Notemaker Application', font=('times new roman',25,'bold'),fg='black')\n title.pack(side=TOP,fill=X)\n\n #-----------Data section------------------\n\n self.title_var=StringVar()\n self.note_var=StringVar()\n self.search_txt=StringVar()\n\n\n#-----------Manage section------------------\n\n Manage_Frame=Frame(self.root,bd=4,relief=RIDGE)\n Manage_Frame.place(x=20,y=70,width=650,height=560)\n\n m_title=Label(Manage_Frame,text='Manage Notes', font=('times new roman',20,'bold'),fg='black')\n m_title.grid(row=0,columnspan=2,pady=20)\n\n lbl_ntitle=Label(Manage_Frame,text='Title',font=('times new roman',15,'bold'),fg='black')\n lbl_ntitle.grid(row=1,column=0,pady=20,sticky='w')\n\n txt_ntitle=Entry(Manage_Frame, textvariable=self.title_var, font=('times new roman',15,'bold'),bd=5,fg='black',relief=GROOVE)\n txt_ntitle.grid(row=1,column=1,pady=20,padx=20,sticky='w')\n\n lbl_ntitle=Label(Manage_Frame,text='Note', font=('times new roman',15,'bold'),fg='black')\n lbl_ntitle.grid(row=2,column=0,pady=20,sticky='w')\n\n self.txt_note=Text(Manage_Frame,width=55,height=10, font=('times new roman',15,'bold'))\n self.txt_note.grid(row=2,column=1,pady=20,padx=20,sticky='w')\n\n#-----------Manage button section------------------\n\n Btn_Frame=Frame(Manage_Frame,bd=4)\n Btn_Frame.place(x=55,y=500,width=500)\n\n add_btn=Button(Btn_Frame,text='Add',command=self.add_notes,width=12,font=('times new roman',10,'bold'),fg='black').grid(row=0,column=0,padx=10,pady=10)\n upd_btn=Button(Btn_Frame,text='Update',width=12,command=self.update,font=('times new roman',10,'bold'),fg='black').grid(row=0,column=1,padx=10,pady=10)\n del_btn=Button(Btn_Frame,text='Delete',width=12,command=self.delete,font=('times new roman',10,'bold'),fg='black').grid(row=0,column=2,padx=10,pady=10)\n clr_btn=Button(Btn_Frame,text='Clear',command=self.clear,width=12,font=('times new roman',10,'bold'),fg='black').grid(row=0,column=3,padx=10,pady=10)\n\n\n\n\n#-----------Display section------------------\n\n Display_Frame=Frame(self.root,bd=4,relief=RIDGE)\n Display_Frame.place(x=700,y=70,width=600,height=560)\n\n d_title=Label(Display_Frame,text='Display Notes', font=('times new roman',20,'bold'),fg='black')\n d_title.grid(row=0,columnspan=2,pady=20)\n\n lbl_ntitle=Label(Display_Frame,text='Title', font=('times new roman',15,'bold'),fg='black')\n lbl_ntitle.grid(row=1,column=0,pady=20,sticky='w')\n\n txt_ser=Entry(Display_Frame,font=('times new roman',15,'bold'), textvariable=self.search_txt,bd=5,fg='black',relief=GROOVE)\n txt_ser.grid(row=1,column=1,pady=20,padx=20,sticky='w')\n\n#-----------Display button section------------------\n\n ser_btn=Button(Display_Frame,text='Search',command=self.search_data,width=12,font=('times new roman',10,'bold'),fg='black').grid(row=1,column=2,padx=10,pady=10)\n sho_btn=Button(Display_Frame,text='Show all',command=self.fetch_data,width=12,font=('times new roman',10,'bold'),fg='black').grid(row=1,column=3,padx=10,pady=10)\n\n#-----------Database section------------------\n\n con=sqlite3.connect('notes.db')\n cur=con.cursor()\n cur.execute('''create table if not exists note(id integer primary key autoincrement, title text,noted text)''')\n con.commit()\n con.close()\n\n#-----------Table section------------------\n\n Table_Frame=Frame(Display_Frame,bd=4)\n Table_Frame.place(x=25,y=150,width=550,height=360)\n\n scrool_v=Scrollbar(Table_Frame,orient=HORIZONTAL)\n scrool_y=Scrollbar(Table_Frame,orient=VERTICAL)\n self.Note_Table=ttk.Treeview(Table_Frame,columns=('ID','Title','Note'),xscrollcommand=scrool_v.set,yscrollcommand=scrool_y.set)\n scrool_v.pack(side=BOTTOM,fill=X)\n scrool_y.pack(side=RIGHT,fill=Y)\n scrool_v.config(command=self.Note_Table.xview)\n scrool_y.config(command=self.Note_Table.yview)\n self.Note_Table.heading('ID',text='ID')\n self.Note_Table.heading('Title',text='Title')\n self.Note_Table.heading('Note',text='Note')\n\n self.Note_Table['show']='headings'\n self.Note_Table.column('ID',width=50)\n self.Note_Table.column('Title',width=100)\n self.Note_Table.column('Note',width=500)\n self.Note_Table.pack(fill=BOTH,expand=1)\n self.Note_Table.bind('',self.get_cursor)\n self.fetch_data()\n\n\n#-----------Functions section------------------\n\n def add_notes(self):\n if self.title_var.get()==\"\" or self.txt_note.get('1.0',END)==\"\":\n mb.showwarning('Error','All fields are requided')\n else:\n con=sqlite3.connect('notes.db')\n cur=con.cursor()\n sqlite_query='''insert into note(title,noted) values(?,?)'''\n datas=(self.title_var.get(),self.txt_note.get('1.0',END))\n cur.execute(sqlite_query,datas)\n con.commit()\n self.fetch_data()\n self.clear()\n con.close()\n mb.showinfo('Success','Note has been Added')\n \n def fetch_data(self):\n\n con=sqlite3.connect('notes.db')\n cur=con.cursor()\n sqlite_query='''select id,title,noted from note'''\n cur.execute(sqlite_query)\n rows=cur.fetchall()\n if len(rows)!=0:\n self.Note_Table.delete(*self.Note_Table.get_children())\n for row in rows:\n self.Note_Table.insert('',END,values=row)\n con.commit()\n else:\n self.Note_Table.delete(*self.Note_Table.get_children())\n con.close()\n self.search_txt.set('')\n\n def clear(self):\n self.title_var.set('')\n self.txt_note.delete('1.0',END)\n\n def get_cursor(self,event):\n cursor_row=self.Note_Table.focus()\n contents=self.Note_Table.item(cursor_row)\n row=contents['values']\n self.title_var.set(row[1])\n self.txt_note.delete('1.0',END)\n self.txt_note.insert(END,row[2])\n \n def update(self):\n con=sqlite3.connect('notes.db')\n cur=con.cursor()\n sqlite_query='''update note set title=?,noted=? where id=?'''\n cursor_row=self.Note_Table.focus()\n contents=self.Note_Table.item(cursor_row)\n row=contents['values']\n datas=(self.title_var.get(),self.txt_note.get('1.0',END),row[0])\n cur.execute(sqlite_query,datas)\n con.commit()\n self.fetch_data()\n self.clear()\n con.close()\n mb.showinfo('Success','Note has been updated')\n\n def delete(self):\n con=sqlite3.connect('notes.db')\n cur=con.cursor()\n cursor_row=self.Note_Table.focus()\n contents=self.Note_Table.item(cursor_row)\n row=contents['values']\n cur.execute('''delete from note where id=%d''' % (row[0]))\n con.commit()\n con.close()\n self.fetch_data()\n self.clear()\n mb.showwarning('Delete','Note deleted')\n\n def search_data(self):\n if self.search_txt.get()=='':\n mb.showerror('Error','Search text not found')\n else:\n con=sqlite3.connect('notes.db')\n cur=con.cursor()\n cur.execute(\"select * from note where title like ?\", ('%'+self.search_txt.get()+'%',))\n rows=cur.fetchall()\n if len(rows)!=0:\n self.Note_Table.delete(*self.Note_Table.get_children())\n for row in rows:\n self.Note_Table.insert('',END,values=row)\n con.commit()\n else:\n self.Note_Table.delete(*self.Note_Table.get_children())\n con.close()\n\n#-----------Loop section------------------\n\nroot=Tk()\nob=Note(root)\nroot.mainloop()\n","sub_path":"Python/notemaker.py","file_name":"notemaker.py","file_ext":"py","file_size_in_byte":8131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"411946713","text":"from traits.api import *\nfrom traitsui.api import *\nfrom saving import BaseSaveHandler\nfrom pyface.api import FileDialog, confirm, error, YES, CANCEL\n#from workspaces import WorkSpace\nfrom project import Project\nfrom experiment import SpectrumExperiment\nfrom measurement import SpectrumMeasurement\n\nclass MainSaveHandler(BaseSaveHandler):\n extension = Str('ws')\n def cfg_autosave(self,info):\n autosave_cfg_view = View(\n VGroup(Item(name='autosave', label='Autosave Enabled', ),\n Item(name='autosaveInterval', label='Autosave Interval',enabled_when='autosave' ),\n ),\n title='Configure Auto-save',\n buttons=['OK'],\n kind='live'\n\n )\n info.object.edit_traits(view=autosave_cfg_view)\n\n def object_autosave_changed(self,info):\n self.autosave = info.object.autosave\n if info.object.autosave:\n\n if info.object.filepath=='':\n\n fileDialog = FileDialog(action='save as', title='Save As',\n wildcard=self.wildcard,\n parent=info.ui.control)\n fileDialog.open()\n if fileDialog.path == '' or fileDialog.return_code == CANCEL:\n info.object.autosave = False\n return False\n else:\n extLen = len(self.extension)\n if extLen and fileDialog.path[-extLen-1:] != '.' + self.extension:\n fileDialog.path += '.' + self.extension\n self.saveObject.filepath = fileDialog.path\n info.object.status = 'Autosave Enabled. Will save every %d seconds to: %s' \\\n % (info.object.autosaveInterval, info.object.filepath)\n else:\n info.object.status = 'Autosave Disabled.'\n\n def comp_integration_tool(self,info):\n info.object.selected.comparison_integration_tool()\n\n def exp_integration_tool(self,info):\n info.object.selected.experiment_integration_tool()\n\n def comp_tool(self,info):\n info.object.selected.comparison_tool()\n\n def plot_tool(self, info):\n info.object.selected.plotting_tool()\n\n def fit_tool(self, info):\n info.object.selected.fitting_tool()\n\n #def new_workspace(self,info):\n #return WorkSpace(main=info.object)\n\n def new_project(self, info, object):\n new = Project(main=info.object)\n info.object.selected.projects.append(new)\n\n def new_experiment(self,info, object):\n new = SpectrumExperiment(main=info.object)\n info.object.selected.experiments.append(new)\n\n\n def new_measurement(self,info, object):\n new = SpectrumMeasurement(main=info.object)\n info.object.selected.measurements.append(new)\n\n def object_autosaveInterval_changed(self,info):\n self.autosaveInterval = info.object.autosaveInterval\n info.object.status = 'Autosave Enabled. Will save every %d seconds to: %s' \\\n % (info.object.autosaveInterval, info.object.filepath)\n\n\n\nclass CrystalHandler(BaseSaveHandler):\n extension = Str('crystal')\n\n\nclass MeasurementHandler(BaseSaveHandler):\n extension = Str('meas')\n\n def add_data(self,info):\n fileDialog = FileDialog(action='save as', title='Save As',\n wildcard=self.wildcard,\n parent=info.ui.control)\n\n fileDialog.open()\n if fileDialog.path == '' or fileDialog.return_code == CANCEL:\n return False\n else:\n info.object.load_from_file(fileDialog.path)\n\n","sub_path":"handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":3694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"181621496","text":"import numpy as np\nimport matplotlib .pyplot as plt\nimport os\nimport cv2\nimport pandas as pd\nfrom tqdm import tqdm\nimport json\n\n# We read the csv Data in using pandas(this is the fastest way I could think to do this)\ndf = pd.read_csv(\"./humpback-whale-identification/train.csv\")\n\n# We convert the pandas datafram to a numpy array\nnparry = df.to_numpy()\nprint(nparry.shape)\n\n# We then seperate the whale image IDs and the labels into two seperate arrays\nwhale_image_IDs = nparry[:,0]\nlabels = nparry[:,1]\nprint(labels)\nprint(labels.shape)\n\n# Convert labels to numbers and create a legend to save that matches up the number to the label\nlegend = {}\nyhat = np.zeros(labels.shape[0])\nindex = 0\nnew_assignment = 0\nfor x in tqdm(labels):\n\tif x in legend.keys():\n\t\tyhat[index] = legend[x]\n\telse:\n\t\tlegend[x] = new_assignment\n\t\tyhat[index] = new_assignment\n\t\tnew_assignment += 1\n\tindex += 1\n\n\nprint(yhat)\nprint(yhat.shape)\nprint(labels.shape)\n\nnp.save('y', yhat)\nwith open(\"legend.json\", \"w\") as js:\n\tjson.dump(legend, js)\n\ny_load = np.load(\"y.npy\")\nprint(y_load.shape)\n\n# For the time being we will convert the image to grey scale, change width and height as needed to test different image sizes\n\nWIDTH = 28\nHEIGHT = 28\nDATADIR = \"./humpback-whale-identification\"\ncategory = \"train\"\nX = np.zeros(shape=(25361, WIDTH, HEIGHT))\n\npath = os.path.join(DATADIR, category)\nindex = 0\nfor img in tqdm(os.listdir(path)):\n\ttry:\n\t\timg_array = cv2.imread(os.path.join(path,img), cv2.IMREAD_GRAYSCALE)\n\t\tnew_array = cv2.resize(img_array, (WIDTH, HEIGHT))\n\t\tX[index] = new_array\n\t\t#plt.imshow(new_array, cmap=\"gray\")\n\t\t#plt.show()\n\t\tindex += 1\n\texcept IndexError:\n\t\tprint(\"We couldn't find that image\")\n#print(X)\nnp.save('X', X)\n\nX = np.load(\"X.npy\")\nprint(X.shape)\n\n\n#If everything worked correctly, we should have two arrays, one of the images, one of the lables\n","sub_path":"Read_and_Clean_Data.py","file_name":"Read_and_Clean_Data.py","file_ext":"py","file_size_in_byte":1843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"36142749","text":"import numpy as np\nimport pandas as pd\nimport requests\nimport json\n\n\ndf = pd.read_csv('./workfile.csv')\nfor z in df.sub_name.values:\n\n\n\tdf_sub = pd.read_csv('./data/{}_final.csv'.format(z))\n\n\tdf_sub.drop_duplicates('author',inplace=True)\n\n\tauth_lst=df_sub.author.values\n\t#print(auth_lst)\n\tsub_lst = []\n\tfor i in auth_lst:\n\t\t#print(i)\n\t\ttry:\n\t\t\tjson_link = 'https://www.reddit.com/user/{}/.json'.format(i)\n\t\t\tres = requests.get(json_link, headers={'User-agent': 'Capstone Bot 0.1'})\n\t\t\tdata = res.json()\n\t\t\n\t\t\tdf=pd.DataFrame(data)\n\t\t\t#print('first')\n\t\t\ttry:\n\t\t\t\t#print('second')\n\t\t\t\tdf=pd.DataFrame(df.T.children[1])\n\t\t\texcept:\n \t\t\tcontinue\n\t\t\tfor i in range(24):\n\t\t\t\tif len(df.data[i])==94:\n\t\t\t\t\t#print(df.data[i]['subreddit'])\n\t\t\t\t\tsub_lst.append(df.data[i]['subreddit'])\n\t\t\tprint(sub_lst[-1])\n\t\texcept:\n\t\t\tcontinue\n\n\t\tdf = pd.DataFrame({'subreddits':sub_lst})\n\n\n\tdf.to_csv('./data/{}_posters_lst.csv'.format(z),index=False)\n\n","sub_path":"scripts2/user_profile_scraper.py","file_name":"user_profile_scraper.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"421391151","text":"class ModelMixin:\n def to_dict(self):\n '''数据转字典'''\n data = []\n for field in self._meta.fields:\n name = field.attname\n # value = self.__dict__[name]\n value = getattr(self, name)\n data[name] = value\n return data\n","sub_path":"lib/orm.py","file_name":"orm.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"209166383","text":"# This script takes two arguments: a file path and a face count\n# Face count: HighFaceCount, MediumFaceCount, or LowFaceCount\n\nimport sys\nimport Metashape\n\n\ndef print_progress(p):\n print('Task progress: {:.2f}%'.format(p))\n\n\ndef build_mesh():\n\n Metashape.License().activate(\"TXC3V-LUVCT-E1BLK-U83UR-GP25H\")\n\n project = \"./\" + sys.argv[1] + \".psx\"\n input = sys.argv[2]\n faces = sys.argv[3]\n output = \"./\" + sys.argv[1] + \"model.fbx\"\n\n doc = Metashape.Document()\n doc.save(path=project)\n chunk = doc.addChunk()\n\n try:\n chunk.importPoints(\n path=input,\n format=Metashape.PointsFormatNone,\n calculate_normals=True,\n progress=print_progress\n # crs=\"geographic\"\n )\n except RuntimeError:\n print(\"Can't import points\")\n\n # reset the region around the point cloud\n chunk.resetRegion()\n\n try:\n # build model using point cloud\n chunk.buildModel(\n surface_type=Metashape.Arbitrary,\n interpolation=Metashape.EnabledInterpolation,\n face_count_custom=faces,\n source_data=Metashape.DenseCloudData,\n vertex_colors=True,\n vertex_confidence=True,\n progress=print_progress\n )\n except RuntimeError:\n print(\"Can't build model\")\n\n try:\n # build texture using vertex colors\n # chunk.buildTexture(\n # blending_mode=Metashape.MosaicBlending,\n # texture_size=4096,\n # fill_holes=True,\n # ghosting_filter=True,\n # texture_type=Metashape.Model.DiffuseMap,\n # progress=print_progress\n # )\n chunk.buildTexture(\n texture_size=4096,\n blending_mode=Metashape.MosaicBlending,\n source_model=chunk.model.key,\n progress=print_progress\n )\n except RuntimeError:\n print(\"Can't build texture\")\n\n # Metashape.Model.addTexture(type=Metashape.Model.DiffuseMap)\n\n try:\n # export model as FBX\n chunk.exportModel(\n path=output,\n binary=True,\n precision=6,\n texture_format=Metashape.ImageFormatJPEG,\n save_texture=True, save_uv=True,\n save_normals=True, save_colors=True,\n save_cameras=True, save_markers=True,\n save_udim=False, save_alpha=False,\n strip_extensions=False,\n raster_transform=Metashape.RasterTransformNone,\n colors_rgb_8bit=True,\n format=Metashape.ModelFormatFBX,\n progress=print_progress\n )\n except RuntimeError:\n print(\"Can't export model\")\n\n\nif __name__ == \"__main__\":\n build_mesh()\n","sub_path":"metashape_script.py","file_name":"metashape_script.py","file_ext":"py","file_size_in_byte":2718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"215955348","text":"def applyMove(state, move):\n new=list(state)\n if move=='PUSH':\n for i in range(len(state)):\n if state[i]==0 and i!=0:\n new[i-1]=state[i]\n new[i]=state[i-1]\n elif move=='PULL':\n for i in range(len(state)):\n if state[i]==0 and i1:\n new[i-2]=state[i-1]\n new[i-1]=state[i-2]\n elif move=='FLIP':\n count=1;\n for i in range(len(state)):\n if state[i]==0 and i h(f['state'])+temp(f['path']) :\n ans = h(f['state'])\n matrix = f\n frontier.remove(matrix)\n return matrix\n \ndef temp (frontier):\n ans=0\n for i in frontier:\n if i=='PUSH':\n ans=ans+10\n elif i=='PULL':\n ans=ans+5\n elif i=='SWAP':\n ans=ans+17\n elif i=='FLIP':\n ans=ans+8\n return ans","sub_path":"assignment_2/Assignment 2, programming part 2_ A_ Search/sortballs.py","file_name":"sortballs.py","file_ext":"py","file_size_in_byte":2046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"259560463","text":"from doctest import TestResults\nfrom gc import freeze\nfrom locale import normalize\nimport os\nimport torch\nfrom tqdm import tqdm\nfrom torchvision.utils import save_image\nimport PIL\nimport numpy as np\nimport json\nimport sys\n# sys.path.append('/data2/dxw/code/github/iccv2023/jojogan_encoder/')\nsys.path.append('/data2/wyf/code/jojogan-3d')\nfrom configs import global_config, paths_config, hyperparameters\nfrom training.coaches.base_coach import BaseCoach\nfrom utils.log_utils import log_images_from_w\nimport mrcfile\nfrom criteria import l2_loss\nfrom pytorch_msssim import ms_ssim\nfrom gen_videos import gen_interp_video\n\nclass SingleIDCoach(BaseCoach):\n def __init__(self, data_loader, use_wandb):\n super().__init__(data_loader, use_wandb)\n\n def train(self, cam_encoder, e4e_encoder):\n # 读取字典超参数output_dir,设置图像保存文件夹output_dir\n args_file = open(paths_config.args_path, 'r')\n args_dic = eval(args_file.read())\n # output_dir = paths_config.output_data_path\n output_dir = args_dic['output_dir'] # 读取args.txt里的命令行设置\n if os.path.isdir(output_dir) == 0:\n os.mkdir(output_dir)\n if os.path.isdir(output_dir + '_pivot') == 0:\n os.mkdir(output_dir + '_pivot')\n use_ball_holder = True\n iters=0\n for fname, image in tqdm(self.data_loader): # for each face samples\n print('11111111111111111111')\n print(image.shape)\n iters+=1\n image_name = fname[0] \n self.restart_training()\n if self.image_counter >= hyperparameters.max_images_to_invert: # 这里可能是指定生成多少张新的视图\n break\n ckpt_dir = os.path.join(paths_config.embedding_dir, output_dir[2:])\n if hyperparameters.use_last_w_pivots:\n w_pivot, freezed_cam = self.load_inversions(ckpt_dir, image_name)\n else:\n w_pivot, freezed_cam = self.calc_inversions(image, image_name, cam_encoder, e4e_encoder, output_dir) \n freezed_cam.requires_grad=False\n w_pivot = w_pivot.to(global_config.device)\n log_images_counter = 0\n real_images_batch = image.to(global_config.device) \n with torch.no_grad():\n # 这里是保存训练前的结果\n generated_image_grid = self.forward(w_pivot, needs_img_grid='small', grid_num=1, need_gt_ingrid=(real_images_batch, freezed_cam))\n intimg = (generated_image_grid.permute(1,2,0) * 127.5 + 128).clamp(0, 255).to(torch.uint8)\n PIL.Image.fromarray(intimg.cpu().numpy(), 'RGB').save(output_dir + f'_pivot/{image_name}.png')\n if global_config.gen_video:\n gen_interp_video(self.G, w_pivot, output_dir + f'_pivot/{image_name}_pivot.mp4')\n w_clone_old = w_pivot.clone().detach() # 克隆一份训练前的w_pivot_old\n np.save(args_dic['output_dir'] + f'/{image_name}_w_old.npy',w_clone_old.cpu().numpy()) # 保存训练前的w_pivot_old\n for i in tqdm(range(hyperparameters.max_pti_steps)): # 训练151次\n # 这里应该就是生成新的视角了,确实是产生新视角的图片\n # 这里就是训练的核心\n generated_images = self.forward(w_pivot, freezed_cam)\n loss, l2_loss_val, loss_lpips = self.calc_loss(generated_images, real_images_batch, image_name,self.G, use_ball_holder, w_pivot)\n self.optimizer.zero_grad()\n if loss_lpips <= hyperparameters.LPIPS_value_threshold:\n break\n loss.backward()\n self.optimizer.step()\n use_ball_holder = global_config.training_step % hyperparameters.locality_regularization_interval == 0\n global_config.training_step += 1\n log_images_counter += 1\n self.image_counter += 1\n with torch.no_grad():\n # 这里是训练后的结果\n generated_image_grid = self.forward(w_pivot, needs_img_grid='small', grid_num=1, need_gt_ingrid=(real_images_batch, freezed_cam))\n print(f'generated_image_grid.shape={generated_image_grid.shape}')\n intimg = (generated_image_grid.permute(1,2,0) * 127.5 + 128).clamp(0, 255).to(torch.uint8)\n intimg_inv = intimg[516:1032,:,:] # 截取GAN inversion后的图像\n print(f'intimg1.shape={intimg_inv.shape}')\n PIL.Image.fromarray(intimg.cpu().numpy(), 'RGB').save(output_dir + f'/{image_name}.png')\n PIL.Image.fromarray(intimg_inv.cpu().numpy(), 'RGB').save(output_dir + f'/{image_name}_inv.png')\n if global_config.gen_video:\n gen_interp_video(self.G, w_pivot, output_dir + f'/{image_name}.mp4')\n w_clone_new = w_pivot.clone().detach() # 克隆一份训练后的w_pivot_new\n np.save(args_dic['output_dir'] + f'/{image_name}_w_new.npy',w_clone_new.cpu().numpy()) # 保存训练后的w_pivot_new\n if global_config.do_evaluation:\n # 这里是用于保存结果的,训练的时候不运行这里。\n with torch.no_grad():\n # save reconstruction\n synimg = self.G.synthesis(w_pivot[:, :14, :], freezed_cam[:, :25], noise_mode='const', force_fp32=True)['image']\n synimg = (synimg+1) / 2\n image = (image.cuda() + 1) / 2\n m_mse = l2_loss.l2_loss(synimg, image).item()\n m_lpips = self.lpips_loss(synimg, image).item()\n m_msssim = ms_ssim(synimg, image, data_range=1, size_average=False ).item()\n synimg = synimg*2 - 1\n image = image*2 - 1\n m_identity = self.id_loss(synimg, image).item() \n # save metrics to txt:\n with open(os.path.join(output_dir, f\"{image_name}metrics.txt\"), \"w\") as f:\n f.write(\"mse: {}\\n\".format(m_mse))\n f.write(\"lpips: {}\\n\".format(m_lpips))\n f.write(\"msssim: {}\\n\".format(m_msssim))\n f.write(\"identity: {}\\n\".format(m_identity)) \n # save mesh \n if global_config.gen_mesh:\n create_geometry(self.G, w_pivot, outdir = output_dir, fname = str(image_name)+\"_pti\")\n #save pivots\n if global_config.save_pivot:\n # 本质都是list之类的东西\n cam_np = freezed_cam.clone().detach().cpu().numpy()\n w_np = torch.from_numpy(w_pivot.clone().detach().cpu())\n np.save(os.path.join(ckpt_dir, f'{image_name}_cam.npy'), cam_np)\n np.save(os.path.join(ckpt_dir, f'{image_name}_ws.npy'), w_np)\n\ndef create_geometry(G, ws, outdir, fname, shape_res = 512, shape_format = '.mrc'):\n # extract a shape.mrc with marching cubes. You can view the .mrc file using ChimeraX from UCSF.\n max_batch=1000000\n device = global_config.device\n samples, voxel_origin, voxel_size = create_samples(N=shape_res, voxel_origin=[0, 0, 0], cube_length=G.rendering_kwargs['box_warp'] * 1)#.reshape(1, -1, 3)\n samples = samples.cuda()\n sigmas = torch.zeros((samples.shape[0], samples.shape[1], 1), device=device)\n transformed_ray_directions_expanded = torch.zeros((samples.shape[0], max_batch, 3), device=device)\n transformed_ray_directions_expanded[..., -1] = -1\n head = 0\n with tqdm(total = samples.shape[1]) as pbar:\n with torch.no_grad():\n while head < samples.shape[1]:\n torch.manual_seed(0)\n coordinates = samples[:, head:head+max_batch]\n directions = transformed_ray_directions_expanded[:, :samples.shape[1]-head]\n planes = G.backbone.synthesis(ws, update_emas = False, noise_mode='const')\n planes = planes.view(len(planes), 3, 32, planes.shape[-2], planes.shape[-1])\n sigma = G.renderer.run_model(planes, G.decoder, coordinates, directions, G.rendering_kwargs)['sigma'] \n sigmas[:, head:head+max_batch] = sigma\n head += max_batch\n pbar.update(max_batch)\n sigmas = sigmas.reshape((shape_res, shape_res, shape_res)).cpu().numpy()\n sigmas = np.flip(sigmas, 0)\n # Trim the border of the extracted cube\n pad = int(30 * shape_res / 256)\n pad_value = -1000\n sigmas[:pad] = pad_value\n sigmas[-pad:] = pad_value\n sigmas[:, :pad] = pad_value\n sigmas[:, -pad:] = pad_value\n sigmas[:, :, :pad] = pad_value\n sigmas[:, :, -pad:] = pad_value\n if shape_format == '.ply':\n from shape_utils import convert_sdf_samples_to_ply\n convert_sdf_samples_to_ply(np.transpose(sigmas, (2, 1, 0)), [0, 0, 0], 1, os.path.join(outdir, str(fname) + '.ply'), level=10)\n elif shape_format == '.mrc': # output mrc\n with mrcfile.new_mmap(os.path.join(outdir, str(fname) + '.mrc'), overwrite=True, shape=sigmas.shape, mrc_mode=2) as mrc:\n mrc.data[:] = sigmas\n \ndef create_samples(N=256, voxel_origin=[0, 0, 0], cube_length=2.0):\n # NOTE: the voxel_origin is actually the (bottom, left, down) corner, not the middle\n voxel_origin = np.array(voxel_origin) - cube_length/2\n voxel_size = cube_length / (N - 1)\n overall_index = torch.arange(0, N ** 3, 1, out=torch.LongTensor())\n samples = torch.zeros(N ** 3, 3)\n # transform first 3 columns\n # to be the x, y, z index\n samples[:, 2] = overall_index % N\n samples[:, 1] = (overall_index.float() / N) % N\n samples[:, 0] = ((overall_index.float() / N) / N) % N\n # transform first 3 columns\n # to be the x, y, z coordinate\n samples[:, 0] = (samples[:, 0] * voxel_size) + voxel_origin[2]\n samples[:, 1] = (samples[:, 1] * voxel_size) + voxel_origin[1]\n samples[:, 2] = (samples[:, 2] * voxel_size) + voxel_origin[0]\n num_samples = N ** 3\n return samples.unsqueeze(0), voxel_origin, voxel_size ","sub_path":"ITPortrait/jojoGAN3D_singleid.py","file_name":"jojoGAN3D_singleid.py","file_ext":"py","file_size_in_byte":10263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"600016736","text":"\"\"\"\nType annotations for fsx service client.\n\n[Open documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_fsx/client.html)\n\nUsage::\n\n ```python\n import boto3\n from mypy_boto3_fsx import FSxClient\n\n client: FSxClient = boto3.client(\"fsx\")\n ```\n\"\"\"\nimport sys\nfrom typing import Any, Dict, List, Type, overload\n\nfrom botocore.client import BaseClient, ClientMeta\n\nfrom .literals import (\n DataRepositoryTaskTypeType,\n FileSystemTypeType,\n RestoreOpenZFSVolumeOptionType,\n StorageTypeType,\n StorageVirtualMachineRootVolumeSecurityStyleType,\n VolumeTypeType,\n)\nfrom .paginator import (\n DescribeBackupsPaginator,\n DescribeFileSystemsPaginator,\n DescribeStorageVirtualMachinesPaginator,\n DescribeVolumesPaginator,\n ListTagsForResourcePaginator,\n)\nfrom .type_defs import (\n AssociateFileSystemAliasesResponseTypeDef,\n CancelDataRepositoryTaskResponseTypeDef,\n CompletionReportTypeDef,\n CopyBackupResponseTypeDef,\n CreateBackupResponseTypeDef,\n CreateDataRepositoryAssociationResponseTypeDef,\n CreateDataRepositoryTaskResponseTypeDef,\n CreateFileCacheLustreConfigurationTypeDef,\n CreateFileCacheResponseTypeDef,\n CreateFileSystemFromBackupResponseTypeDef,\n CreateFileSystemLustreConfigurationTypeDef,\n CreateFileSystemOntapConfigurationTypeDef,\n CreateFileSystemOpenZFSConfigurationTypeDef,\n CreateFileSystemResponseTypeDef,\n CreateFileSystemWindowsConfigurationTypeDef,\n CreateOntapVolumeConfigurationTypeDef,\n CreateOpenZFSVolumeConfigurationTypeDef,\n CreateSnapshotResponseTypeDef,\n CreateStorageVirtualMachineResponseTypeDef,\n CreateSvmActiveDirectoryConfigurationTypeDef,\n CreateVolumeFromBackupResponseTypeDef,\n CreateVolumeResponseTypeDef,\n DataRepositoryTaskFilterTypeDef,\n DeleteBackupResponseTypeDef,\n DeleteDataRepositoryAssociationResponseTypeDef,\n DeleteFileCacheResponseTypeDef,\n DeleteFileSystemLustreConfigurationTypeDef,\n DeleteFileSystemOpenZFSConfigurationTypeDef,\n DeleteFileSystemResponseTypeDef,\n DeleteFileSystemWindowsConfigurationTypeDef,\n DeleteSnapshotResponseTypeDef,\n DeleteStorageVirtualMachineResponseTypeDef,\n DeleteVolumeOntapConfigurationTypeDef,\n DeleteVolumeOpenZFSConfigurationTypeDef,\n DeleteVolumeResponseTypeDef,\n DescribeBackupsResponseTypeDef,\n DescribeDataRepositoryAssociationsResponseTypeDef,\n DescribeDataRepositoryTasksResponseTypeDef,\n DescribeFileCachesResponseTypeDef,\n DescribeFileSystemAliasesResponseTypeDef,\n DescribeFileSystemsResponseTypeDef,\n DescribeSnapshotsResponseTypeDef,\n DescribeStorageVirtualMachinesResponseTypeDef,\n DescribeVolumesResponseTypeDef,\n DisassociateFileSystemAliasesResponseTypeDef,\n FileCacheDataRepositoryAssociationTypeDef,\n FilterTypeDef,\n ListTagsForResourceResponseTypeDef,\n ReleaseFileSystemNfsV3LocksResponseTypeDef,\n RestoreVolumeFromSnapshotResponseTypeDef,\n S3DataRepositoryConfigurationTypeDef,\n SnapshotFilterTypeDef,\n StorageVirtualMachineFilterTypeDef,\n TagTypeDef,\n UpdateDataRepositoryAssociationResponseTypeDef,\n UpdateFileCacheLustreConfigurationTypeDef,\n UpdateFileCacheResponseTypeDef,\n UpdateFileSystemLustreConfigurationTypeDef,\n UpdateFileSystemOntapConfigurationTypeDef,\n UpdateFileSystemOpenZFSConfigurationTypeDef,\n UpdateFileSystemResponseTypeDef,\n UpdateFileSystemWindowsConfigurationTypeDef,\n UpdateOntapVolumeConfigurationTypeDef,\n UpdateOpenZFSVolumeConfigurationTypeDef,\n UpdateSnapshotResponseTypeDef,\n UpdateStorageVirtualMachineResponseTypeDef,\n UpdateSvmActiveDirectoryConfigurationTypeDef,\n UpdateVolumeResponseTypeDef,\n VolumeFilterTypeDef,\n)\n\nif sys.version_info >= (3, 8):\n from typing import Literal\nelse:\n from typing_extensions import Literal\n\n__all__ = (\"FSxClient\",)\n\nclass BotocoreClientError(BaseException):\n MSG_TEMPLATE: str\n\n def __init__(self, error_response: Dict[str, Any], operation_name: str) -> None:\n self.response: Dict[str, Any]\n self.operation_name: str\n\nclass Exceptions:\n ActiveDirectoryError: Type[BotocoreClientError]\n BackupBeingCopied: Type[BotocoreClientError]\n BackupInProgress: Type[BotocoreClientError]\n BackupNotFound: Type[BotocoreClientError]\n BackupRestoring: Type[BotocoreClientError]\n BadRequest: Type[BotocoreClientError]\n ClientError: Type[BotocoreClientError]\n DataRepositoryAssociationNotFound: Type[BotocoreClientError]\n DataRepositoryTaskEnded: Type[BotocoreClientError]\n DataRepositoryTaskExecuting: Type[BotocoreClientError]\n DataRepositoryTaskNotFound: Type[BotocoreClientError]\n FileCacheNotFound: Type[BotocoreClientError]\n FileSystemNotFound: Type[BotocoreClientError]\n IncompatibleParameterError: Type[BotocoreClientError]\n IncompatibleRegionForMultiAZ: Type[BotocoreClientError]\n InternalServerError: Type[BotocoreClientError]\n InvalidDataRepositoryType: Type[BotocoreClientError]\n InvalidDestinationKmsKey: Type[BotocoreClientError]\n InvalidExportPath: Type[BotocoreClientError]\n InvalidImportPath: Type[BotocoreClientError]\n InvalidNetworkSettings: Type[BotocoreClientError]\n InvalidPerUnitStorageThroughput: Type[BotocoreClientError]\n InvalidRegion: Type[BotocoreClientError]\n InvalidSourceKmsKey: Type[BotocoreClientError]\n MissingFileCacheConfiguration: Type[BotocoreClientError]\n MissingFileSystemConfiguration: Type[BotocoreClientError]\n MissingVolumeConfiguration: Type[BotocoreClientError]\n NotServiceResourceError: Type[BotocoreClientError]\n ResourceDoesNotSupportTagging: Type[BotocoreClientError]\n ResourceNotFound: Type[BotocoreClientError]\n ServiceLimitExceeded: Type[BotocoreClientError]\n SnapshotNotFound: Type[BotocoreClientError]\n SourceBackupUnavailable: Type[BotocoreClientError]\n StorageVirtualMachineNotFound: Type[BotocoreClientError]\n UnsupportedOperation: Type[BotocoreClientError]\n VolumeNotFound: Type[BotocoreClientError]\n\nclass FSxClient(BaseClient):\n \"\"\"\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/fsx.html#FSx.Client)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_fsx/client.html)\n \"\"\"\n\n meta: ClientMeta\n\n @property\n def exceptions(self) -> Exceptions:\n \"\"\"\n FSxClient exceptions.\n \"\"\"\n def associate_file_system_aliases(\n self, *, FileSystemId: str, Aliases: List[str], ClientRequestToken: str = None\n ) -> AssociateFileSystemAliasesResponseTypeDef:\n \"\"\"\n Use this action to associate one or more Domain Name Server (DNS) aliases with\n an existing Amazon FSx for Windows File Server file system.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/fsx.html#FSx.Client.associate_file_system_aliases)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_fsx/client.html#associate_file_system_aliases)\n \"\"\"\n def can_paginate(self, operation_name: str) -> bool:\n \"\"\"\n Check if an operation can be paginated.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/fsx.html#FSx.Client.can_paginate)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_fsx/client.html#can_paginate)\n \"\"\"\n def cancel_data_repository_task(\n self, *, TaskId: str\n ) -> CancelDataRepositoryTaskResponseTypeDef:\n \"\"\"\n Cancels an existing Amazon FSx for Lustre data repository task if that task is\n in either the `PENDING` or `EXECUTING` state.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/fsx.html#FSx.Client.cancel_data_repository_task)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_fsx/client.html#cancel_data_repository_task)\n \"\"\"\n def close(self) -> None:\n \"\"\"\n Closes underlying endpoint connections.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/fsx.html#FSx.Client.close)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_fsx/client.html#close)\n \"\"\"\n def copy_backup(\n self,\n *,\n SourceBackupId: str,\n ClientRequestToken: str = None,\n SourceRegion: str = None,\n KmsKeyId: str = None,\n CopyTags: bool = None,\n Tags: List[\"TagTypeDef\"] = None\n ) -> CopyBackupResponseTypeDef:\n \"\"\"\n Copies an existing backup within the same Amazon Web Services account to another\n Amazon Web Services Region (cross-Region copy) or within the same Amazon Web\n Services Region (in-Region copy).\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/fsx.html#FSx.Client.copy_backup)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_fsx/client.html#copy_backup)\n \"\"\"\n def create_backup(\n self,\n *,\n FileSystemId: str = None,\n ClientRequestToken: str = None,\n Tags: List[\"TagTypeDef\"] = None,\n VolumeId: str = None\n ) -> CreateBackupResponseTypeDef:\n \"\"\"\n Creates a backup of an existing Amazon FSx for Windows File Server file system,\n Amazon FSx for Lustre file system, Amazon FSx for NetApp ONTAP volume, or Amazon\n FSx for OpenZFS file system.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/fsx.html#FSx.Client.create_backup)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_fsx/client.html#create_backup)\n \"\"\"\n def create_data_repository_association(\n self,\n *,\n FileSystemId: str,\n DataRepositoryPath: str,\n FileSystemPath: str = None,\n BatchImportMetaDataOnCreate: bool = None,\n ImportedFileChunkSize: int = None,\n S3: \"S3DataRepositoryConfigurationTypeDef\" = None,\n ClientRequestToken: str = None,\n Tags: List[\"TagTypeDef\"] = None\n ) -> CreateDataRepositoryAssociationResponseTypeDef:\n \"\"\"\n Creates an Amazon FSx for Lustre data repository association (DRA).\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/fsx.html#FSx.Client.create_data_repository_association)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_fsx/client.html#create_data_repository_association)\n \"\"\"\n def create_data_repository_task(\n self,\n *,\n Type: DataRepositoryTaskTypeType,\n FileSystemId: str,\n Report: \"CompletionReportTypeDef\",\n Paths: List[str] = None,\n ClientRequestToken: str = None,\n Tags: List[\"TagTypeDef\"] = None,\n CapacityToRelease: int = None\n ) -> CreateDataRepositoryTaskResponseTypeDef:\n \"\"\"\n Creates an Amazon FSx for Lustre data repository task.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/fsx.html#FSx.Client.create_data_repository_task)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_fsx/client.html#create_data_repository_task)\n \"\"\"\n def create_file_cache(\n self,\n *,\n FileCacheType: Literal[\"LUSTRE\"],\n FileCacheTypeVersion: str,\n StorageCapacity: int,\n SubnetIds: List[str],\n ClientRequestToken: str = None,\n SecurityGroupIds: List[str] = None,\n Tags: List[\"TagTypeDef\"] = None,\n CopyTagsToDataRepositoryAssociations: bool = None,\n KmsKeyId: str = None,\n LustreConfiguration: \"CreateFileCacheLustreConfigurationTypeDef\" = None,\n DataRepositoryAssociations: List[\"FileCacheDataRepositoryAssociationTypeDef\"] = None\n ) -> CreateFileCacheResponseTypeDef:\n \"\"\"\n Creates a new Amazon File Cache resource.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/fsx.html#FSx.Client.create_file_cache)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_fsx/client.html#create_file_cache)\n \"\"\"\n def create_file_system(\n self,\n *,\n FileSystemType: FileSystemTypeType,\n StorageCapacity: int,\n SubnetIds: List[str],\n ClientRequestToken: str = None,\n StorageType: StorageTypeType = None,\n SecurityGroupIds: List[str] = None,\n Tags: List[\"TagTypeDef\"] = None,\n KmsKeyId: str = None,\n WindowsConfiguration: \"CreateFileSystemWindowsConfigurationTypeDef\" = None,\n LustreConfiguration: \"CreateFileSystemLustreConfigurationTypeDef\" = None,\n OntapConfiguration: \"CreateFileSystemOntapConfigurationTypeDef\" = None,\n FileSystemTypeVersion: str = None,\n OpenZFSConfiguration: \"CreateFileSystemOpenZFSConfigurationTypeDef\" = None\n ) -> CreateFileSystemResponseTypeDef:\n \"\"\"\n Creates a new, empty Amazon FSx file system.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/fsx.html#FSx.Client.create_file_system)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_fsx/client.html#create_file_system)\n \"\"\"\n def create_file_system_from_backup(\n self,\n *,\n BackupId: str,\n SubnetIds: List[str],\n ClientRequestToken: str = None,\n SecurityGroupIds: List[str] = None,\n Tags: List[\"TagTypeDef\"] = None,\n WindowsConfiguration: \"CreateFileSystemWindowsConfigurationTypeDef\" = None,\n LustreConfiguration: \"CreateFileSystemLustreConfigurationTypeDef\" = None,\n StorageType: StorageTypeType = None,\n KmsKeyId: str = None,\n FileSystemTypeVersion: str = None,\n OpenZFSConfiguration: \"CreateFileSystemOpenZFSConfigurationTypeDef\" = None,\n StorageCapacity: int = None\n ) -> CreateFileSystemFromBackupResponseTypeDef:\n \"\"\"\n Creates a new Amazon FSx for Lustre, Amazon FSx for Windows File Server, or\n Amazon FSx for OpenZFS file system from an existing Amazon FSx backup.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/fsx.html#FSx.Client.create_file_system_from_backup)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_fsx/client.html#create_file_system_from_backup)\n \"\"\"\n def create_snapshot(\n self,\n *,\n Name: str,\n VolumeId: str,\n ClientRequestToken: str = None,\n Tags: List[\"TagTypeDef\"] = None\n ) -> CreateSnapshotResponseTypeDef:\n \"\"\"\n Creates a snapshot of an existing Amazon FSx for OpenZFS volume.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/fsx.html#FSx.Client.create_snapshot)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_fsx/client.html#create_snapshot)\n \"\"\"\n def create_storage_virtual_machine(\n self,\n *,\n FileSystemId: str,\n Name: str,\n ActiveDirectoryConfiguration: \"CreateSvmActiveDirectoryConfigurationTypeDef\" = None,\n ClientRequestToken: str = None,\n SvmAdminPassword: str = None,\n Tags: List[\"TagTypeDef\"] = None,\n RootVolumeSecurityStyle: StorageVirtualMachineRootVolumeSecurityStyleType = None\n ) -> CreateStorageVirtualMachineResponseTypeDef:\n \"\"\"\n Creates a storage virtual machine (SVM) for an Amazon FSx for ONTAP file system.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/fsx.html#FSx.Client.create_storage_virtual_machine)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_fsx/client.html#create_storage_virtual_machine)\n \"\"\"\n def create_volume(\n self,\n *,\n VolumeType: VolumeTypeType,\n Name: str,\n ClientRequestToken: str = None,\n OntapConfiguration: \"CreateOntapVolumeConfigurationTypeDef\" = None,\n Tags: List[\"TagTypeDef\"] = None,\n OpenZFSConfiguration: \"CreateOpenZFSVolumeConfigurationTypeDef\" = None\n ) -> CreateVolumeResponseTypeDef:\n \"\"\"\n Creates an FSx for ONTAP or Amazon FSx for OpenZFS storage volume.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/fsx.html#FSx.Client.create_volume)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_fsx/client.html#create_volume)\n \"\"\"\n def create_volume_from_backup(\n self,\n *,\n BackupId: str,\n Name: str,\n ClientRequestToken: str = None,\n OntapConfiguration: \"CreateOntapVolumeConfigurationTypeDef\" = None,\n Tags: List[\"TagTypeDef\"] = None\n ) -> CreateVolumeFromBackupResponseTypeDef:\n \"\"\"\n Creates a new Amazon FSx for NetApp ONTAP volume from an existing Amazon FSx\n volume backup.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/fsx.html#FSx.Client.create_volume_from_backup)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_fsx/client.html#create_volume_from_backup)\n \"\"\"\n def delete_backup(\n self, *, BackupId: str, ClientRequestToken: str = None\n ) -> DeleteBackupResponseTypeDef:\n \"\"\"\n Deletes an Amazon FSx backup.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/fsx.html#FSx.Client.delete_backup)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_fsx/client.html#delete_backup)\n \"\"\"\n def delete_data_repository_association(\n self,\n *,\n AssociationId: str,\n ClientRequestToken: str = None,\n DeleteDataInFileSystem: bool = None\n ) -> DeleteDataRepositoryAssociationResponseTypeDef:\n \"\"\"\n Deletes a data repository association on an Amazon FSx for Lustre file system.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/fsx.html#FSx.Client.delete_data_repository_association)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_fsx/client.html#delete_data_repository_association)\n \"\"\"\n def delete_file_cache(\n self, *, FileCacheId: str, ClientRequestToken: str = None\n ) -> DeleteFileCacheResponseTypeDef:\n \"\"\"\n Deletes an Amazon File Cache resource.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/fsx.html#FSx.Client.delete_file_cache)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_fsx/client.html#delete_file_cache)\n \"\"\"\n def delete_file_system(\n self,\n *,\n FileSystemId: str,\n ClientRequestToken: str = None,\n WindowsConfiguration: \"DeleteFileSystemWindowsConfigurationTypeDef\" = None,\n LustreConfiguration: \"DeleteFileSystemLustreConfigurationTypeDef\" = None,\n OpenZFSConfiguration: \"DeleteFileSystemOpenZFSConfigurationTypeDef\" = None\n ) -> DeleteFileSystemResponseTypeDef:\n \"\"\"\n Deletes a file system.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/fsx.html#FSx.Client.delete_file_system)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_fsx/client.html#delete_file_system)\n \"\"\"\n def delete_snapshot(\n self, *, SnapshotId: str, ClientRequestToken: str = None\n ) -> DeleteSnapshotResponseTypeDef:\n \"\"\"\n Deletes an Amazon FSx for OpenZFS snapshot.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/fsx.html#FSx.Client.delete_snapshot)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_fsx/client.html#delete_snapshot)\n \"\"\"\n def delete_storage_virtual_machine(\n self, *, StorageVirtualMachineId: str, ClientRequestToken: str = None\n ) -> DeleteStorageVirtualMachineResponseTypeDef:\n \"\"\"\n Deletes an existing Amazon FSx for ONTAP storage virtual machine (SVM).\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/fsx.html#FSx.Client.delete_storage_virtual_machine)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_fsx/client.html#delete_storage_virtual_machine)\n \"\"\"\n def delete_volume(\n self,\n *,\n VolumeId: str,\n ClientRequestToken: str = None,\n OntapConfiguration: \"DeleteVolumeOntapConfigurationTypeDef\" = None,\n OpenZFSConfiguration: \"DeleteVolumeOpenZFSConfigurationTypeDef\" = None\n ) -> DeleteVolumeResponseTypeDef:\n \"\"\"\n Deletes an Amazon FSx for NetApp ONTAP or Amazon FSx for OpenZFS volume.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/fsx.html#FSx.Client.delete_volume)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_fsx/client.html#delete_volume)\n \"\"\"\n def describe_backups(\n self,\n *,\n BackupIds: List[str] = None,\n Filters: List[\"FilterTypeDef\"] = None,\n MaxResults: int = None,\n NextToken: str = None\n ) -> DescribeBackupsResponseTypeDef:\n \"\"\"\n Returns the description of a specific Amazon FSx backup, if a `BackupIds` value\n is provided for that backup.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/fsx.html#FSx.Client.describe_backups)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_fsx/client.html#describe_backups)\n \"\"\"\n def describe_data_repository_associations(\n self,\n *,\n AssociationIds: List[str] = None,\n Filters: List[\"FilterTypeDef\"] = None,\n MaxResults: int = None,\n NextToken: str = None\n ) -> DescribeDataRepositoryAssociationsResponseTypeDef:\n \"\"\"\n Returns the description of specific Amazon FSx for Lustre or Amazon File Cache\n data repository associations, if one or more `AssociationIds` values are\n provided in the request, or if filters are used in the request.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/fsx.html#FSx.Client.describe_data_repository_associations)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_fsx/client.html#describe_data_repository_associations)\n \"\"\"\n def describe_data_repository_tasks(\n self,\n *,\n TaskIds: List[str] = None,\n Filters: List[\"DataRepositoryTaskFilterTypeDef\"] = None,\n MaxResults: int = None,\n NextToken: str = None\n ) -> DescribeDataRepositoryTasksResponseTypeDef:\n \"\"\"\n Returns the description of specific Amazon FSx for Lustre or Amazon File Cache\n data repository tasks, if one or more `TaskIds` values are provided in the\n request, or if filters are used in the request.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/fsx.html#FSx.Client.describe_data_repository_tasks)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_fsx/client.html#describe_data_repository_tasks)\n \"\"\"\n def describe_file_caches(\n self, *, FileCacheIds: List[str] = None, MaxResults: int = None, NextToken: str = None\n ) -> DescribeFileCachesResponseTypeDef:\n \"\"\"\n Returns the description of a specific Amazon File Cache resource, if a\n `FileCacheIds` value is provided for that cache.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/fsx.html#FSx.Client.describe_file_caches)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_fsx/client.html#describe_file_caches)\n \"\"\"\n def describe_file_system_aliases(\n self,\n *,\n FileSystemId: str,\n ClientRequestToken: str = None,\n MaxResults: int = None,\n NextToken: str = None\n ) -> DescribeFileSystemAliasesResponseTypeDef:\n \"\"\"\n Returns the DNS aliases that are associated with the specified Amazon FSx for\n Windows File Server file system.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/fsx.html#FSx.Client.describe_file_system_aliases)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_fsx/client.html#describe_file_system_aliases)\n \"\"\"\n def describe_file_systems(\n self, *, FileSystemIds: List[str] = None, MaxResults: int = None, NextToken: str = None\n ) -> DescribeFileSystemsResponseTypeDef:\n \"\"\"\n Returns the description of specific Amazon FSx file systems, if a\n `FileSystemIds` value is provided for that file system.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/fsx.html#FSx.Client.describe_file_systems)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_fsx/client.html#describe_file_systems)\n \"\"\"\n def describe_snapshots(\n self,\n *,\n SnapshotIds: List[str] = None,\n Filters: List[\"SnapshotFilterTypeDef\"] = None,\n MaxResults: int = None,\n NextToken: str = None\n ) -> DescribeSnapshotsResponseTypeDef:\n \"\"\"\n Returns the description of specific Amazon FSx for OpenZFS snapshots, if a\n `SnapshotIds` value is provided.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/fsx.html#FSx.Client.describe_snapshots)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_fsx/client.html#describe_snapshots)\n \"\"\"\n def describe_storage_virtual_machines(\n self,\n *,\n StorageVirtualMachineIds: List[str] = None,\n Filters: List[\"StorageVirtualMachineFilterTypeDef\"] = None,\n MaxResults: int = None,\n NextToken: str = None\n ) -> DescribeStorageVirtualMachinesResponseTypeDef:\n \"\"\"\n Describes one or more Amazon FSx for NetApp ONTAP storage virtual machines\n (SVMs).\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/fsx.html#FSx.Client.describe_storage_virtual_machines)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_fsx/client.html#describe_storage_virtual_machines)\n \"\"\"\n def describe_volumes(\n self,\n *,\n VolumeIds: List[str] = None,\n Filters: List[\"VolumeFilterTypeDef\"] = None,\n MaxResults: int = None,\n NextToken: str = None\n ) -> DescribeVolumesResponseTypeDef:\n \"\"\"\n Describes one or more Amazon FSx for NetApp ONTAP or Amazon FSx for OpenZFS\n volumes.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/fsx.html#FSx.Client.describe_volumes)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_fsx/client.html#describe_volumes)\n \"\"\"\n def disassociate_file_system_aliases(\n self, *, FileSystemId: str, Aliases: List[str], ClientRequestToken: str = None\n ) -> DisassociateFileSystemAliasesResponseTypeDef:\n \"\"\"\n Use this action to disassociate, or remove, one or more Domain Name Service\n (DNS) aliases from an Amazon FSx for Windows File Server file system.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/fsx.html#FSx.Client.disassociate_file_system_aliases)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_fsx/client.html#disassociate_file_system_aliases)\n \"\"\"\n def generate_presigned_url(\n self,\n ClientMethod: str,\n Params: Dict[str, Any] = None,\n ExpiresIn: int = 3600,\n HttpMethod: str = None,\n ) -> str:\n \"\"\"\n Generate a presigned url given a client, its method, and arguments.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/fsx.html#FSx.Client.generate_presigned_url)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_fsx/client.html#generate_presigned_url)\n \"\"\"\n def list_tags_for_resource(\n self, *, ResourceARN: str, MaxResults: int = None, NextToken: str = None\n ) -> ListTagsForResourceResponseTypeDef:\n \"\"\"\n Lists tags for Amazon FSx resources.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/fsx.html#FSx.Client.list_tags_for_resource)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_fsx/client.html#list_tags_for_resource)\n \"\"\"\n def release_file_system_nfs_v3_locks(\n self, *, FileSystemId: str, ClientRequestToken: str = None\n ) -> ReleaseFileSystemNfsV3LocksResponseTypeDef:\n \"\"\"\n Releases the file system lock from an Amazon FSx for OpenZFS file system.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/fsx.html#FSx.Client.release_file_system_nfs_v3_locks)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_fsx/client.html#release_file_system_nfs_v3_locks)\n \"\"\"\n def restore_volume_from_snapshot(\n self,\n *,\n VolumeId: str,\n SnapshotId: str,\n ClientRequestToken: str = None,\n Options: List[RestoreOpenZFSVolumeOptionType] = None\n ) -> RestoreVolumeFromSnapshotResponseTypeDef:\n \"\"\"\n Returns an Amazon FSx for OpenZFS volume to the state saved by the specified\n snapshot.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/fsx.html#FSx.Client.restore_volume_from_snapshot)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_fsx/client.html#restore_volume_from_snapshot)\n \"\"\"\n def tag_resource(self, *, ResourceARN: str, Tags: List[\"TagTypeDef\"]) -> Dict[str, Any]:\n \"\"\"\n Tags an Amazon FSx resource.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/fsx.html#FSx.Client.tag_resource)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_fsx/client.html#tag_resource)\n \"\"\"\n def untag_resource(self, *, ResourceARN: str, TagKeys: List[str]) -> Dict[str, Any]:\n \"\"\"\n This action removes a tag from an Amazon FSx resource.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/fsx.html#FSx.Client.untag_resource)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_fsx/client.html#untag_resource)\n \"\"\"\n def update_data_repository_association(\n self,\n *,\n AssociationId: str,\n ClientRequestToken: str = None,\n ImportedFileChunkSize: int = None,\n S3: \"S3DataRepositoryConfigurationTypeDef\" = None\n ) -> UpdateDataRepositoryAssociationResponseTypeDef:\n \"\"\"\n Updates the configuration of an existing data repository association on an\n Amazon FSx for Lustre file system.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/fsx.html#FSx.Client.update_data_repository_association)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_fsx/client.html#update_data_repository_association)\n \"\"\"\n def update_file_cache(\n self,\n *,\n FileCacheId: str,\n ClientRequestToken: str = None,\n LustreConfiguration: \"UpdateFileCacheLustreConfigurationTypeDef\" = None\n ) -> UpdateFileCacheResponseTypeDef:\n \"\"\"\n Updates the configuration of an existing Amazon File Cache resource.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/fsx.html#FSx.Client.update_file_cache)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_fsx/client.html#update_file_cache)\n \"\"\"\n def update_file_system(\n self,\n *,\n FileSystemId: str,\n ClientRequestToken: str = None,\n StorageCapacity: int = None,\n WindowsConfiguration: \"UpdateFileSystemWindowsConfigurationTypeDef\" = None,\n LustreConfiguration: \"UpdateFileSystemLustreConfigurationTypeDef\" = None,\n OntapConfiguration: \"UpdateFileSystemOntapConfigurationTypeDef\" = None,\n OpenZFSConfiguration: \"UpdateFileSystemOpenZFSConfigurationTypeDef\" = None\n ) -> UpdateFileSystemResponseTypeDef:\n \"\"\"\n Use this operation to update the configuration of an existing Amazon FSx file\n system.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/fsx.html#FSx.Client.update_file_system)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_fsx/client.html#update_file_system)\n \"\"\"\n def update_snapshot(\n self, *, Name: str, SnapshotId: str, ClientRequestToken: str = None\n ) -> UpdateSnapshotResponseTypeDef:\n \"\"\"\n Updates the name of an Amazon FSx for OpenZFS snapshot.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/fsx.html#FSx.Client.update_snapshot)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_fsx/client.html#update_snapshot)\n \"\"\"\n def update_storage_virtual_machine(\n self,\n *,\n StorageVirtualMachineId: str,\n ActiveDirectoryConfiguration: \"UpdateSvmActiveDirectoryConfigurationTypeDef\" = None,\n ClientRequestToken: str = None,\n SvmAdminPassword: str = None\n ) -> UpdateStorageVirtualMachineResponseTypeDef:\n \"\"\"\n Updates an Amazon FSx for ONTAP storage virtual machine (SVM).\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/fsx.html#FSx.Client.update_storage_virtual_machine)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_fsx/client.html#update_storage_virtual_machine)\n \"\"\"\n def update_volume(\n self,\n *,\n VolumeId: str,\n ClientRequestToken: str = None,\n OntapConfiguration: \"UpdateOntapVolumeConfigurationTypeDef\" = None,\n Name: str = None,\n OpenZFSConfiguration: \"UpdateOpenZFSVolumeConfigurationTypeDef\" = None\n ) -> UpdateVolumeResponseTypeDef:\n \"\"\"\n Updates the configuration of an Amazon FSx for NetApp ONTAP or Amazon FSx for\n OpenZFS volume.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/fsx.html#FSx.Client.update_volume)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_fsx/client.html#update_volume)\n \"\"\"\n @overload\n def get_paginator(\n self, operation_name: Literal[\"describe_backups\"]\n ) -> DescribeBackupsPaginator:\n \"\"\"\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/fsx.html#FSx.Paginator.DescribeBackups)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_fsx/paginators.html#describebackupspaginator)\n \"\"\"\n @overload\n def get_paginator(\n self, operation_name: Literal[\"describe_file_systems\"]\n ) -> DescribeFileSystemsPaginator:\n \"\"\"\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/fsx.html#FSx.Paginator.DescribeFileSystems)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_fsx/paginators.html#describefilesystemspaginator)\n \"\"\"\n @overload\n def get_paginator(\n self, operation_name: Literal[\"describe_storage_virtual_machines\"]\n ) -> DescribeStorageVirtualMachinesPaginator:\n \"\"\"\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/fsx.html#FSx.Paginator.DescribeStorageVirtualMachines)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_fsx/paginators.html#describestoragevirtualmachinespaginator)\n \"\"\"\n @overload\n def get_paginator(\n self, operation_name: Literal[\"describe_volumes\"]\n ) -> DescribeVolumesPaginator:\n \"\"\"\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/fsx.html#FSx.Paginator.DescribeVolumes)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_fsx/paginators.html#describevolumespaginator)\n \"\"\"\n @overload\n def get_paginator(\n self, operation_name: Literal[\"list_tags_for_resource\"]\n ) -> ListTagsForResourcePaginator:\n \"\"\"\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/fsx.html#FSx.Paginator.ListTagsForResource)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_fsx/paginators.html#listtagsforresourcepaginator)\n \"\"\"\n","sub_path":"typings/mypy_boto3/fsx/client.pyi","file_name":"client.pyi","file_ext":"pyi","file_size_in_byte":38779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"320993629","text":"from tkinter import *\r\nimport pandas as pd\r\nimport random\r\n\r\nBACKGROUND_COLOR = \"#B1DDC6\"\r\ncurrent_card = {}\r\nto_learn = {}\r\n\r\n# --------------------------------- WORDS DATA --------------------------------- #\r\ntry:\r\n data = pd.read_csv(\"data/words_that_learned.csv\")\r\nexcept FileNotFoundError:\r\n original_data = pd.read_csv(\"data/turkish_words.csv\")\r\n to_learn = original_data.to_dict(orient=\"records\")\r\nelse:\r\n to_learn = data.to_dict(orient=\"records\")\r\n\r\n\r\ndef next_card():\r\n global current_card, flip_timer\r\n window.after_cancel(flip_timer)\r\n current_card = random.choice(to_learn)\r\n canvas.itemconfig(card_title, text=\"English\", fill=\"black\")\r\n canvas.itemconfig(card_word, text=current_card[\"English\"], fill=\"black\")\r\n canvas.itemconfig(card_background, image=card_front_img)\r\n flip_timer = window.after(5000, func=flip_card)\r\n\r\n\r\n# --------------------------------- FLIP CARDS --------------------------------- #\r\ndef flip_card():\r\n canvas.itemconfig(card_title, text=\"Turkish\", fill=\"white\")\r\n canvas.itemconfig(card_word, text=current_card[\"Turkish\"], fill=\"white\")\r\n canvas.itemconfig(card_background, image=card_back_img)\r\n\r\n\r\n# --------------------------------- SAVE KNOWN WORDS --------------------------------- #\r\ndef is_known():\r\n to_learn.remove(current_card)\r\n new_data = pd.DataFrame(to_learn)\r\n new_data.to_csv(\"data/words_that_learned.csv\", index=False)\r\n next_card()\r\n\r\n\r\n# --------------------------------- UI SETUP --------------------------------- #\r\nwindow = Tk()\r\nwindow.title(\"Flash Cards\")\r\nwindow.config(padx=50, pady=50, bg=BACKGROUND_COLOR)\r\n\r\nflip_timer = window.after(5000, func=flip_card)\r\n\r\n# Canvas\r\ncanvas = Canvas(width=800, height=526)\r\ncard_front_img = PhotoImage(file=\"./images/card_front.png\")\r\ncard_back_img = PhotoImage(file=\"./images/card_back.png\")\r\ncard_background = canvas.create_image(400, 263, image=card_front_img)\r\ncard_title = canvas.create_text(400, 150, text=\"\", font=(\"Arial\", 30, \"italic\"))\r\ncard_word = canvas.create_text(400, 263, text=\"\", font=(\"Arial\", 60, \"bold\"))\r\ncanvas.config(bg=BACKGROUND_COLOR, highlightthickness=0)\r\ncanvas.grid(row=0, column=0, columnspan=2)\r\n\r\n# Buttons\r\nwrong_image = PhotoImage(file=\"./images/wrong.png\")\r\nwrong_button = Button(image=wrong_image, highlightthickness=0, bd=0, command=next_card)\r\nright_image = PhotoImage(file=\"./images/right.png\")\r\nright_button = Button(image=right_image, highlightthickness=0, bd=0, command=is_known)\r\n\r\nwrong_button.grid(row=1, column=0)\r\nright_button.grid(row=1, column=1)\r\n\r\nnext_card()\r\n\r\nwindow.mainloop()\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"142915195","text":"#!/bin/env python\n# coding=utf8\n\nimport os\nimport logging\nlogger1 = logging.getLogger(__name__)\n\n\ndef STAR_virus(args):\n\n logger1.info('align virus genome...')\n\n sample = args.sample\n outdir = args.outdir\n input_read = args.input_read\n virus_genomeDir = args.virus_genomeDir\n thread = args.thread\n outFilterMatchNmin = args.outFilterMatchNmin\n\n # check dir\n if not os.path.exists(outdir):\n os.system('mkdir -p %s' % (outdir))\n\n out_prefix = outdir + \"/\" + sample + \"_virus_\"\n out_BAM = out_prefix + \"Aligned.sortedByCoord.out.bam\"\n\n # host genome align\n cmd = \"STAR \\\n --genomeDir {genome} \\\n --readFilesIn {input_read}\\\n --outFilterMatchNmin 35\\\n --outSAMtype BAM SortedByCoordinate\\\n --runThreadN {runThreadN}\\\n --limitBAMsortRAM 1933647594\\\n --outFileNamePrefix {out_prefix}\".format(genome=virus_genomeDir,\n input_read=input_read, runThreadN=thread, out_prefix=out_prefix)\n\n # add gz\n if input_read[len(input_read) - 2:] == \"gz\":\n cmd += ' --readFilesCommand zcat'\n\n logger1.info(cmd)\n os.system(cmd)\n logger1.info(\"align virus genome done.\")\n\n cmd = \"samtools index {out_BAM}\".format(out_BAM=out_BAM)\n logger1.info(cmd)\n os.system(cmd)\n logger1.info(\"index done.\")\n\n\ndef get_opts_STAR_virus(parser, sub_program):\n if sub_program:\n parser.add_argument('--outdir', help='output dir', required=True)\n parser.add_argument('--sample', help='sample name', required=True)\n parser.add_argument(\"--input_read\", required=True)\n parser.add_argument(\"--thread\", help='STAR thread', default=1)\n parser.add_argument('--assay', help='assay', required=True)\n parser.add_argument(\n '--virus_genomeDir',\n help='virus genome dir',\n required=True)\n parser.add_argument(\"--outFilterMatchNmin\", help='STAR outFilterMatchNmin', default=35)\n","sub_path":"celescope/rna_virus/STAR_virus.py","file_name":"STAR_virus.py","file_ext":"py","file_size_in_byte":1907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"557142491","text":"from __future__ import absolute_import\nfrom __future__ import print_function\n\nimport atexit\nimport os\nimport pprint\nimport socket\nimport select\nfrom threading import Thread, Lock\nimport requests\nimport signal\nimport json\nimport time\nimport numpy\nfrom io import BytesIO\nimport six\nimport base64\nimport PIL.Image\ntry:\n from cStringIO import StringIO\nexcept ImportError:\n from io import StringIO\n\nfrom aetros.MonitorThread import MonitoringThread\n\n\ndef on_shutdown():\n for job in on_shutdown.started_jobs:\n if job.running:\n job.done()\n\n\non_shutdown.started_jobs = []\n\natexit.register(on_shutdown)\n\n\ndef dict_factory(cursor, row):\n d = {}\n for idx, col in enumerate(cursor.description):\n d[col[0]] = row[idx]\n return d\n\n\ndef invalid_json_values(obj):\n if isinstance(obj, numpy.generic):\n return obj.item()\n if isinstance(obj, numpy.ndarray):\n return obj.tolist()\n if isinstance(obj, bytes):\n return obj.decode('cp437')\n raise TypeError('Invalid data type passed to json encoder: ' + type(obj).__name__)\n\n\ndef parse_message(buffer):\n parsed = []\n while -1 != buffer.find('\\n'):\n term_position = buffer.find('\\n')\n messages = buffer[0:term_position]\n messages = messages.split('\\t')\n for message in messages:\n if message:\n parsed.append(json.loads(message))\n\n buffer = buffer[term_position + 1:]\n\n return buffer, parsed\n\n\nclass EventListener:\n def __init__(self):\n self.events = {}\n\n def on(self, name, callback):\n if name not in self.events:\n self.events[name] = []\n\n self.events[name].append(callback)\n\n def fire(self, name, parameter=None):\n if name in self.events:\n for callback in self.events[name]:\n callback(parameter)\n\n\nclass Client:\n def __init__(self, api_host, api_token, event_listener, api_port):\n \"\"\"\n\n :type api_host: string\n :type api_token: string\n :type event_listener: EventListener\n :type job_id: integer\n \"\"\"\n self.api_token = api_token\n self.api_host = api_host\n self.api_port = api_port\n self.event_listener = event_listener\n self.message_id = 0\n self.job_id = None\n self.thread_instance = None\n self.s = None\n\n self.lock = Lock()\n self.connection_errors = 0\n self.queue = []\n\n self.active = False\n self.registered = False\n self.connected = False\n self.read_buffer = ''\n\n def start(self, job_id):\n self.job_id = job_id\n\n self.active = True\n\n if not self.thread_instance:\n self.thread_instance = Thread(target=self.thread)\n self.thread_instance.daemon = True\n self.thread_instance.start()\n\n def connect(self):\n locked = False\n\n try:\n locked = True\n self.lock.acquire()\n self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.s.connect((self.api_host, self.api_port))\n self.connected = True\n self.lock.release()\n locked = False\n\n self.send_message({'register_job_worker': self.api_token, 'job_id': self.job_id})\n messages = self.read_full_message(self.s)\n\n if \"JOB_REGISTERED\" in messages:\n self.registered = True\n print(\"Connected to %s \" % (self.api_host,))\n self.handle_messages(messages)\n return True\n\n print(\"Registration of job %s failed.\" % (self.job_id,))\n return False\n except socket.error as error:\n if locked:\n self.lock.release()\n print(\"Connection error during connecting to %s: %d: %s.\" % (self.api_host, error.errno, error.message))\n time.sleep(1)\n return False\n\n def debug(self):\n sent = len(filter(lambda x: x['sent'], self.queue))\n sending = len(filter(lambda x: x['sending'], self.queue))\n open = len(filter(lambda x: not x['sending'], self.queue))\n print(\"%d sent, %d in sending, %d open \" % (sent, sending, open))\n\n def connection_error(self, error=None):\n if not self.connected:\n return\n\n if socket is None:\n # python interpreter is already dying, so quit\n return\n\n if hasattr(error, 'errno') and hasattr(error, 'message'):\n print(\"Connection error: %d: %s\" % (error.errno, error.message,))\n else:\n print(\"Connection error\")\n\n self.connected = False\n self.registered = False\n\n # set all messages that are in sending to sending=false\n for message in self.queue:\n if message['sending'] and not message['sent']:\n message['sending'] = False\n\n self.connection_errors += 1\n\n def thread(self):\n last_ping = 0\n\n while True:\n if self.connected and self.registered:\n try:\n if last_ping < time.time() - 1:\n # ping every second\n last_ping = time.time()\n self.send_message(\"PING\")\n\n if self.connected:\n # send pending messages\n max_messages = 1\n for message in self.queue:\n if not message['sending'] and not message['sent']:\n max_messages += 1\n self.send_message(message)\n if max_messages > 10:\n # not too much at once, so we have time to listen for incoming messages\n break\n\n # see if we can read something\n self.lock.acquire()\n readable, writable, exceptional = select.select([self.s], [self.s], [])\n self.lock.release()\n if exceptional:\n self.connection_error()\n\n for s in readable:\n messages = self.read(s)\n if messages:\n self.handle_messages(messages)\n\n except Exception as error:\n self.connection_error(error)\n\n elif not self.connected and self.active:\n if not self.connect():\n time.sleep(5)\n\n time.sleep(0.1)\n\n def end(self):\n #send all missing messages\n while True:\n if len(self.queue) == 0:\n break\n\n time.sleep(0.1)\n\n\n def close(self):\n self.active = False\n self.connected = False\n\n self.lock.acquire()\n try:\n self.s.shutdown(socket.SHUT_RDWR)\n self.s.close()\n except:\n pass\n self.lock.release()\n\n def send(self, message):\n self.message_id += 1\n message['id'] = self.message_id\n message['sending'] = False\n message['sent'] = False\n self.queue.append(message)\n\n def send_message(self, message):\n if isinstance(message, dict):\n message['sending'] = True\n\n msg = json.dumps(message, default=invalid_json_values)\n\n try:\n self.lock.acquire()\n self.s.sendall(msg + \"\\n\")\n self.lock.release()\n\n return True\n except:\n self.lock.release()\n self.connection_error()\n return False\n\n def handle_messages(self, messages):\n for message in messages:\n if 'handled' in message:\n for qm in self.queue:\n if qm['id'] == message['id']:\n\n if message['handled']:\n qm['sent'] = True\n self.queue.remove(qm)\n else:\n qm['sending'] = False\n\n break\n\n if 'stop' in message:\n self.event_listener.fire('stop')\n\n def read_full_message(self, s):\n \"\"\"\n Reads until we receive a message termination (\\n)\n \"\"\"\n message = ''\n\n while True:\n chunk = ''\n try:\n self.lock.acquire()\n chunk = s.recv(2048)\n finally:\n self.lock.release()\n\n if chunk == '':\n self.connection_error()\n return False\n\n message += chunk\n\n message, parsed = parse_message(message)\n if parsed:\n return parsed\n\n def read(self, s):\n \"\"\"\n Reads per call current buffer from network stack. If a full message has been collected (\\n retrieved)\n the message will be parsed and returned. If no message has yet been completley transmitted it returns []\n\n :return: list\n \"\"\"\n\n chunk = ''\n\n try:\n self.lock.acquire()\n chunk = s.recv(2048)\n finally:\n self.lock.release()\n\n if chunk == '':\n self.connection_error()\n return False\n\n self.read_buffer += chunk\n\n self.read_buffer, parsed = parse_message(self.read_buffer)\n return parsed\n\n\ndef start_job(name, api_token=None):\n \"\"\"\n Tries to load the job defined in the AETROS_JOB_ID environment variable.\n If not defined, it creates a new job.\n Starts the job as well.\n\n :param name: string\n :param api_token: string\n :return: JobBackend\n \"\"\"\n\n job_id = os.getenv('AETROS_JOB_ID')\n if job_id:\n job = JobBackend(api_token=api_token)\n job.load(job_id)\n else:\n job = create_job(name, api_token)\n\n job.start()\n\n return job\n\n\ndef create_job(name, api_token=None):\n \"\"\"\n Creates a new job.\n\n :param name: string\n :param api_token: string\n :return: JobBackend\n \"\"\"\n job = JobBackend(api_token=api_token)\n job.create(name)\n job.load()\n\n return job\n\n\nclass JobLossChannel:\n \"\"\"\n :type job_backend : JobBackend\n \"\"\"\n def __init__(self, job_backend, name, xaxis=None, yaxis=None, layout=None):\n self.name = name\n self.job_backend = job_backend\n message = {\n 'name': self.name,\n 'traces': ['training', 'validation'],\n 'type': JobChannel.NUMBER,\n 'main': True,\n 'xaxis': xaxis,\n 'yaxis': yaxis,\n 'layout': layout,\n 'lossChannel': True\n }\n self.job_backend.job_add_status('channel', message)\n\n def send(self, x, training_loss, validation_loss):\n\n message = {\n 'name': self.name,\n 'x': x,\n 'y': [training_loss, validation_loss],\n }\n self.job_backend.job_add_status('channel-value', message)\n\nclass JobImage:\n def __init__(self, id, image, title=None):\n self.id = id\n if not isinstance(image, PIL.Image.Image):\n raise Exception('JobImage requires a PIL.Image as image argument.')\n\n self.image = image\n self.title = title or id\n\n\nclass JobChannel:\n NUMBER = 'number'\n TEXT = 'text'\n IMAGE = 'image'\n\n \"\"\"\n :type job_backend: JobBackend\n \"\"\"\n def __init__(self, job_backend, name, traces=None, main_graph=False,\n type=None, xaxis=None, yaxis=None, layout=None):\n self.name = name\n self.job_backend = job_backend\n\n if not (isinstance(traces, list) or traces is None):\n raise Exception('traces can only be None or a list of dicts: [{name: \"name\", option1: ...}, ...]')\n\n if not traces:\n traces = [{'name': name}]\n\n if isinstance(traces, list) and isinstance(traces[0], six.string_types):\n traces = map(lambda x: {'name': x}, traces)\n\n message = {\n 'name': name,\n 'traces': traces,\n 'type': type or JobChannel.NUMBER,\n 'main': main_graph,\n 'xaxis': xaxis,\n 'yaxis': yaxis,\n 'layout': layout,\n }\n self.traces = traces\n self.job_backend.job_add_status('channel', message)\n\n def send(self, x, y):\n if not isinstance(y, list):\n y = [y]\n\n if len(y) != len(self.traces):\n raise Exception('You tried to set more y values (%d items) then traces available in channel %s (%d traces).' % (\n len(y), self.name, len(self.traces)))\n\n message = {\n 'name': self.name,\n 'x': x,\n 'y': y\n }\n self.job_backend.job_add_status('channel-value', message)\n\n\nclass JobBackend:\n \"\"\"\n :type event_listener: EventListener\n :type api_token: string\n :type job_id: int\n :type client: Client\n :type job: dict\n \"\"\"\n\n def __init__(self, job_id=None, api_token=None):\n self.event_listener = EventListener()\n self.api_token = api_token if api_token else os.getenv('API_KEY')\n\n self.job_id = job_id\n self.client = None\n self.job = None\n self.running = False\n self.monitoring_thread = None\n\n self.host = os.getenv('API_HOST')\n self.port = int(os.getenv('API_PORT') or 8051)\n if not self.host or self.host == 'false':\n self.host = 'aetros.com'\n\n self.last_progress_call = None\n self.job_ids = []\n self.in_request = False\n self.stop_requested = False\n self.event_listener.on('stop', self.external_stop)\n self.client = Client(self.host, self.api_token, self.event_listener, self.port)\n\n def external_stop(self, params):\n print(\"Job stopped through AETROS Trainer.\")\n self.abort()\n self.stop_requested = True\n os.kill(os.getpid(), signal.SIGINT)\n\n def progress(self, epoch, total):\n if self.last_progress_call:\n # how long took it since the last call?\n time_per_epoch = time.time() - self.last_progress_call\n eta = time_per_epoch * (total-epoch)\n self.set_info('eta', eta)\n if time_per_epoch > 0:\n self.set_info('epochsPerSecond', 1 / time_per_epoch)\n\n self.set_info('epoch', epoch)\n self.set_info('epochs', total)\n self.last_progress_call = time.time()\n\n def create_loss_channel(self, name, xaxis=None, yaxis=None, layout=None):\n \"\"\"\n :param name: string\n :return: JobLossGraph\n \"\"\"\n\n return JobLossChannel(self, name, xaxis, yaxis, layout)\n\n def create_channel(self, name, traces=None, main_graph=False,\n type=JobChannel.NUMBER,\n xaxis=None, yaxis=None, layout=None):\n \"\"\"\n :param name: string\n :param traces: list\n :param main_graph: bool\n :param type: string JobChannel.NUMBER, JobChannel.TEXT, JobChannel.IMAGE\n :return: JobChannel\n \"\"\"\n return JobChannel(self, name, traces, main_graph, type, xaxis, yaxis, layout)\n\n def start(self):\n if not self.job_id:\n raise Exception('No job id found. Use create() first.')\n\n self.running = True\n on_shutdown.started_jobs.append(self)\n self.collect_system_information()\n self.start_monitoring()\n\n self.client.start(self.job_id)\n\n print(\"Job %s#%d (%s) started. Open http://%s/trainer/app#/training=%s to monitor the training.\" %\n (self.model_id, self.job_index, self.job_id, self.host, self.job_id))\n\n def start_monitoring(self):\n self.monitoring_thread = MonitoringThread(self)\n self.monitoring_thread.daemon = True\n self.monitoring_thread.start()\n\n def done(self):\n if not self.running:\n return\n\n self.post('job/stopped', json={'id': self.job_id})\n\n self.client.end()\n self.running = False\n\n if self.monitoring_thread:\n self.monitoring_thread.stop()\n\n def abort(self):\n if not self.running:\n return\n\n self.post('job/aborted', json={'id': self.job_id})\n\n self.client.close()\n self.running = False\n if self.monitoring_thread:\n self.monitoring_thread.stop()\n\n def crash(self, e=None):\n self.post('job/crashed', json={'id': self.job_id, 'error': e.message if e else None})\n\n self.client.close()\n self.running = False\n if self.monitoring_thread:\n self.monitoring_thread.stop()\n\n def get_url(self, affix):\n\n url = 'http://%s/api/%s' % (self.host, affix)\n\n if self.api_token:\n if '?' in url:\n url += '&token=' + self.api_token\n else:\n url += '?token=' + self.api_token\n\n return url\n\n def write_log(self, message):\n item = {'message': message}\n\n self.client.send({\n 'type': 'log',\n 'time': time.time(),\n 'data': item\n })\n\n def upload_weights(self, name, file_path, accuracy=None, with_status=False):\n if not os.path.isfile(file_path):\n return\n\n files = {}\n files[name] = open(file_path, 'r')\n\n class CancelledError(Exception):\n\n def __init__(self, msg):\n self.msg = msg\n Exception.__init__(self, msg)\n\n def __str__(self):\n return self.msg\n\n __repr__ = __str__\n\n class BufferReader(BytesIO):\n\n def __init__(self, buf=b'',\n callback=None,\n cb_args=(),\n cb_kwargs={}):\n self._callback = callback\n self._cb_args = cb_args\n self._cb_kwargs = cb_kwargs\n self._progress = 0\n self._len = len(buf)\n BytesIO.__init__(self, buf)\n\n def __len__(self):\n return self._len\n\n def read(self, n=-1):\n chunk = BytesIO.read(self, n)\n self._progress += int(len(chunk))\n self._cb_kwargs.update({\n 'size': self._len,\n 'progress': self._progress\n })\n if self._callback:\n try:\n self._callback(*self._cb_args, **self._cb_kwargs)\n except:\n raise CancelledError('The upload was cancelled.')\n return chunk\n\n state = {'progress': -1}\n\n def progress(size=None, progress=None):\n current_progress = int(progress / size * 100)\n if state['progress'] != current_progress:\n state['progress'] = current_progress\n\n if with_status:\n self.set_status('UPLOAD WEIGHTS ' + str(current_progress) + '%')\n else:\n print((\"{0}%\".format(current_progress)))\n\n files = {\"upfile\": (name, open(file_path, 'rb').read())}\n\n (data, ctype) = requests.packages.urllib3.filepost.encode_multipart_formdata(files)\n\n headers = {\n \"Content-Type\": ctype\n }\n\n body = BufferReader(data, progress)\n\n url = self.get_url('job/weights?id=%s&accuracy=%f.2' %\n (self.job_id, accuracy if accuracy is not None else -1))\n response = requests.post(url, data=body, headers=headers)\n\n if response.status_code != 200:\n raise Exception('Uploading of weights failed: %d: %s' %\n (response.status_code, response.content))\n\n def set_status(self, status):\n print('Job status changed to %s ' % (status,))\n self.job_add_status('status', status)\n\n def get_best_weight_url(self, job_id):\n response = self.get('job/weight-best', {'id': job_id})\n return response.json()\n\n def get(self, url, params=None, **kwargs):\n json_chunk = kwargs.get('json')\n if (json_chunk and not isinstance(json_chunk, str)):\n kwargs['json'] = json.loads(json.dumps(json_chunk, default=invalid_json_values))\n\n return requests.get(self.get_url(url), params=params, **kwargs)\n\n def post(self, url, data=None, **kwargs):\n json_chunk = kwargs.get('json')\n if (json_chunk and not isinstance(json_chunk, str)):\n kwargs['json'] = json.loads(json.dumps(json_chunk, default=invalid_json_values))\n\n return requests.post(self.get_url(url), data=data, **kwargs)\n\n def put(self, url, data=None, **kwargs):\n json_chunk = kwargs.get('json')\n if (json_chunk and not isinstance(json_chunk, str)):\n kwargs['json'] = json.loads(json.dumps(json_chunk, default=invalid_json_values))\n\n return requests.put(self.get_url(url), data=data, **kwargs)\n\n def create(self, name, server_id='local', hyperparameter=None, dataset_id=None, insights=False):\n response = self.put('job', json={\n 'modelId': name,\n 'serverId': server_id,\n 'hyperParameters': hyperparameter,\n 'insights': insights,\n 'datasetId': dataset_id\n })\n\n if response.status_code != 200:\n raise Exception(\"Could not create job: %s\" % (response.content,))\n\n self.job_id = response.json()\n\n return self.job_id\n\n def ensure_model(self, name, model_json, settings=None, type='custom', layers=None, graph=None):\n response = self.put('model/ensure', {\n 'id': name,\n 'type': type,\n 'model': model_json,\n 'settings': json.dumps(settings, allow_nan=False, default=invalid_json_values) if settings else None,\n 'layers': json.dumps(layers, allow_nan=False, default=invalid_json_values),\n 'graph': json.dumps(graph, allow_nan=False, default=invalid_json_values),\n })\n\n if response.status_code != 200:\n raise Exception(\"Could not create model: %s\" % (response.content,))\n\n return True\n\n @property\n def model_id(self):\n if not self.job:\n raise Exception('Job not loaded yet. Use load(id) first.')\n\n return self.job['modelId']\n\n @property\n def job_index(self):\n if not self.job:\n raise Exception('Job not loaded yet. Use load(id) first.')\n\n return self.job['index']\n\n def is_keras_model(self):\n if not self.job:\n raise Exception('Job not loaded yet. Use load(id) first.')\n\n return not self.job['config']['fromCode']\n\n def get_parameter(self, name):\n if not self.job:\n raise Exception('Job not loaded yet. Use load(id) first.')\n\n if 'hyperParameters' not in self.job['config'] or not self.job['config']['hyperParameters']:\n raise Exception('This job does not have any hyper-parameters')\n\n if name not in self.job['config']['hyperParameters']:\n raise Exception('This job does not have the hype parameter %s' % (name,))\n\n return self.job['config']['hyperParameters'][name]\n\n def load(self, id=None):\n \"\"\"\n Loads job and sets as current.\n :param id: int\n \"\"\"\n if id:\n self.job_id = id\n\n if not self.job_id:\n raise Exception('No job id given.')\n\n response = self.get('job', {'id': self.job_id})\n\n if response.status_code != 200:\n raise Exception(\"Could not find job: %s\" % (response.content,))\n\n job = response.json()\n\n if job is None or 'error' in job:\n raise Exception('Job not found. Have you configured your token correctly? %s: %s' %\n (job['error'], job['message']))\n\n self.job = response.json()\n\n def get_job_model(self):\n \"\"\"\n Returns a new JobModel instance with current loaded job data attached.\n :return: JobModel\n \"\"\"\n if not self.job:\n raise Exception('Job not loaded yet. Use load(id) first.')\n\n from aetros.JobModel import JobModel\n\n return JobModel(self.job)\n\n def sync_weights(self):\n self.job_add_status('status', 'SYNC WEIGHTS')\n print(\"Sync weights ...\")\n self.upload_weights('best.hdf5', self.get_job_model().get_weights_filepath_best(), with_status=True)\n print(\"Weights synced.\")\n\n def load_light_job(self, id=None):\n \"\"\"\n Loads job with less information and sets as current.\n :param id: int\n \"\"\"\n if id:\n self.job_id = id\n\n if not self.job_id:\n raise Exception('No job id given.')\n\n response = self.get('job', {'id': self.job_id, 'light': 1})\n if response.status_code != 200:\n raise Exception(\"Could not find version (%s): %s\" % (self.job_id, response.content,))\n\n job = response.json()\n\n if job is None or job == 'Job not found':\n raise Exception('Version not found. Have you configured your token correctly?')\n\n if 'error' in job:\n raise Exception('Version not found. Have you configured your token correctly? %s: %s' % (\n job['error'], job['message']))\n\n if not isinstance(job, dict):\n raise Exception(\n 'Version does not exist. Make sure you created the job via AETROS TRAINER')\n\n if not len(job['config']):\n raise Exception(\n 'Version does not have a configuration. Make sure you created the job via AETROS TRAINER')\n\n self.job = job\n\n def job_add_status(self, key, value):\n item = {'statusKey': key, 'statusValue': value}\n\n self.client.send({\n 'type': 'job-status',\n 'time': time.time(),\n 'data': item\n })\n\n def set_info(self, name, value):\n self.job_set_info_key(name, value)\n\n def job_set_info_key(self, key, value):\n self.client.send({\n 'type': 'job-info',\n 'data': {'key': key, 'value': value}\n })\n\n def job_add_insight(self, x, images, confusion_matrix):\n info = {'epoch': x, 'confusionMatrix': confusion_matrix}\n\n converted_images = []\n for image in images:\n if not isinstance(image, JobImage):\n raise Exception('job_add_insight only accepts JobImage instances in images argument')\n\n converted_images.append({\n 'id': image.id,\n 'title': image.title,\n 'image': self.to_base64(image.image)\n })\n\n self.client.send({\n 'type': 'job-insight',\n 'time': time.time(),\n 'data': {'info': info, 'images': converted_images}\n })\n\n def to_base64(self, image):\n buffer = BytesIO()\n if (six.PY2):\n buffer = StringIO()\n image.save(buffer, format=\"JPEG\", optimize=True, quality=80)\n return base64.b64encode(buffer.getvalue())\n\n\n def collect_system_information(self):\n import psutil\n\n mem = psutil.virtual_memory()\n self.job_set_info_key('memory_total', mem.total)\n\n on_gpu = False\n\n import sys\n if 'theano.sandbox' in sys.modules:\n # at this point, theano is already initialised, so we can use it to monitor the GPU.\n from theano.sandbox import cuda\n self.job_set_info_key('cuda_available', cuda.cuda_available)\n if cuda.cuda_available:\n on_gpu = cuda.use.device_number is not None\n self.job_set_info_key('cuda_device_number', cuda.active_device_number())\n self.job_set_info_key('cuda_device_name', cuda.active_device_name())\n if cuda.cuda_ndarray.cuda_ndarray.mem_info:\n gpu = cuda.cuda_ndarray.cuda_ndarray.mem_info()\n self.job_set_info_key('cuda_device_max_memory', gpu[1])\n free = gpu[0] / 1024 / 1024 / 1024\n total = gpu[1] / 1024 / 1024 / 1024\n used = total - free\n\n print(\"%.2fGB GPU memory used of %.2fGB, %s, device id %d\" % (used, total, cuda.active_device_name(), cuda.active_device_number()))\n\n self.job_set_info_key('on_gpu', on_gpu)\n\n import cpuinfo\n cpu = cpuinfo.get_cpu_info()\n self.job_set_info_key('cpu_name', cpu['brand'])\n self.job_set_info_key('cpu', [cpu['hz_actual_raw'][0], cpu['count']])\n","sub_path":"aetros/backend.py","file_name":"backend.py","file_ext":"py","file_size_in_byte":28441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"546110328","text":"totalpaid=0\r\ncount=1\r\npreviousbalance=balance\r\nwhile count<=12:\r\n monthlypayment=round(previousbalance*monthlyPaymentRate,2)\r\n interest=(previousbalance-monthlypayment)*(annualInterestRate/12)\r\n previousbalance=round(previousbalance+interest-monthlypayment,2)\r\n print(\"Month:\"+str(count))\r\n print(\"Minimum monthly payment:\"+str(monthlypayment))\r\n print(\"Remaining balance:\"+str(previousbalance))\r\n totalpaid=totalpaid+monthlypayment\r\n count=count+1\r\nprint(\"Total paid:\"+str(totalpaid))\r\nprint(\"Remaining balance:\"+str(previousbalance))\r\n","sub_path":"ProblemSet1/pblmset2_1.py","file_name":"pblmset2_1.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"282629721","text":"import os\nimport math\nimport plynth\nimport plynth.js as js\n\n\ndocument, window, console = js.document, js.window, js.console\nthree = js.THREE\n\n#if not 'esmod' in globals():\nesmod = js.esmod\n\nesmodImportPromise = js.esImport( {\n \"jsm/controls/OrbitControls.js\": [\"OrbitControls\"],\n \"jsm/geometries/ConvexGeometry.js\": [\"ConvexBufferGeometry\"]\n});\n\n\nasync def main():\n await esmodImportPromise\n CalcApp()\n\n\nclass CalcApp:\n def __init__(self):\n self.setup()\n self.animate()\n\n def setup(self):\n # https://github.com/mrdoob/three.js/blob/master/examples/webgl_geometry_convex.html\n\n self.scene = three.Scene();\n\n self.renderer = three.WebGLRenderer( { \"antialias\": True } );\n self.renderer.setPixelRatio( window.devicePixelRatio );\n self.renderer.setSize( window.innerWidth, window.innerHeight );\n document.body.appendChild( self.renderer.domElement );\n\n # camera\n self.camera = three.PerspectiveCamera( 40, window.innerWidth / window.innerHeight, 1, 1000 );\n self.camera.position.set( 15, 20, 30 );\n self.scene.add( self.camera );\n\n # controls\n controls = esmod.OrbitControls( self.camera, self.renderer.domElement );\n controls.minDistance = 20;\n controls.maxDistance = 50;\n controls.maxPolarAngle = math.pi / 2;\n\n self.scene.add( three.AmbientLight( 0x222222 ) );\n\n # light\n light = three.PointLight( 0xffffff, 0.6 );\n self.camera.add( light );\n\n #helper\n self.scene.add( three.AxesHelper( 20 ) );\n\n #textures\n loader = three.TextureLoader();\n texture = loader.load( 'textures/sprites/disc.png' );\n\n self.group = three.Group();\n self.scene.add( self.group );\n\n\n #points\n vertices = three.DodecahedronGeometry( 10 ).vertices;\n #for ( var i = 0; i < vertices.length; i ++ ) {\n # //vertices[ i ].add( randomPoint().multiplyScalar( 2 ) ); // wiggle the points\n #}\n\n pointsMaterial = three.PointsMaterial( {\n \"color\": 0x0080ff,\n \"map\": texture,\n \"size\": 1,\n \"alphaTest\": 0.5\n });\n\n pointsGeometry = three.BufferGeometry().setFromPoints(vertices);\n\n points = three.Points(pointsGeometry, pointsMaterial);\n self.group.add(points);\n\n #convex hull\n meshMaterial = three.MeshLambertMaterial({\n \"color\": 0xffffff,\n \"opacity\": 0.4,\n \"transparent\": True\n });\n\n\n meshGeometry = esmod.ConvexBufferGeometry( vertices );\n\n mesh = three.Mesh(meshGeometry, meshMaterial );\n mesh.material.side = three.BackSide; # // back faces\n mesh.renderOrder = 0;\n self.group.add(mesh);\n\n mesh = three.Mesh(meshGeometry, meshMaterial.clone());\n mesh.material.side = three.FrontSide; # // front faces\n mesh.renderOrder = 1;\n\n self.group.add(mesh);\n\n\n def animate(self):\n\n js.requestAnimationFrame( self.animate );\n\n self.group.rotation.y += 0.005;\n\n self.render();\n\n\n def render(self):\n self.renderer.render( self.scene, self.camera );\n\n","sub_path":"__utils/examples/threejs/plynth_calc.py","file_name":"plynth_calc.py","file_ext":"py","file_size_in_byte":3185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"91342328","text":"from . import models\nfrom rest_framework import serializers\n\nclass CharacterSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.Character\n fields = (\n 'name','image',\n 'date_old','age',\n 'race','rank',\n 'fruit_name','fruit_type',\n 'origin','attack',\n 'occupation','description',\n 'state',\n 'sex','reward',\n )\n # exclude = ['is_remove','created','modified']","sub_path":"core/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"425046623","text":"# =============================================================================\n# =============================================================================\n# ARIMA\n# =============================================================================\n\n# ARIMA stands for Auto-Regressie Integrated Modiving Averages\n# p is the number of autoregrssive terms\n# d is the number of non-seasonal differences needed for stationarity\n# q is the number o f lagged forecase errors in the prediction equation\n\n# =============================================================================\n# General Model\n# =============================================================================\n\nfrom statsmodels.tsa.arima_model import ARIMA\nmodel = ARIMA(series, order=(p,d,q))\nmodel_fit = model.fit(disp=0)\noutput = model_fit.forecast()\n\n# =============================================================================\n# Example \n# =============================================================================\n\nfrom statsmodels.tsa.arima_model import ARIMA\nimport pandas as pd\nimport os\nimport numpy as np\nfrom datetime import datetime\n\n# ====================Creating a Random Time Series ===========================\ncounts = np.arange(1,21) + 0.2 * (np.random.random(size=(20,)) - 0.5) #Create a range from 1 to 21. Creating random numbers\nstart = pd.datetime.strptime(\"1 Nov 16\", \"%d %b %y\")\ndaterange = pd.date_range(start, periods=20)\ntable = {\"count\": counts, \"date\": daterange} #Coding it as a dictionary\n\n# ====================Pre-Processing===========================\ndata = pd.DataFrame(table) # displaying it as a data frame\ndata.set_index(\"date\", inplace = True)\nprint(data)\n\n# ====================Setting up ARIMA===========================\nmodel = ARIMA(data[0:len(data)-1], (1,1,1)) #Mahdi's original example, doesn't work\nmodel = ARIMA(data[0:len(data)-1], (2,1,1))\nmodel_fit = model.fit(disp=0)\nprint(model_fit.forecast())\n\n# =============================================================================\n# Example Question\n# =============================================================================\nfrom statsmodels.tsa.arima_model import ARIMA\nimport pandas as pd\n\n#------ Pre-Procesing of imported Dataset with Pandas ---------#\ndf_all = pd.read_csv('./ReferenceFiles/GlobalLandTemperaturesByCountry.csv', header=0)\n\n# Dropping 'AvergeTemperatureUncertainty' column-This column is useless for our case !!\ndf_all_reduced = df_all.drop('AverageTemperatureUncertainty', axis=1)\n\n# Filtering 'France' as country\ndf_france = df_all_reduced [df_all_reduced.Country == 'France']\n\n# Dropping 'Country' column\ndf_france = df_france.drop('Country', axis=1)\n\n# Converting 'Date' column to a datetime format index to access data based on dates.\ndf_france.index = pd.to_datetime(df_france['Date'])\n\n# dropping 'Date' column. We use dates as index from now on, so we don't need them as an ex-tra column(input)\ndf_france = df_france.drop('Date', axis=1)\n\n# Filtering data starting from 1950-01-01\ndf_france = df_france.loc['1960-01-01':]\n\n# Sorting index in an ascending way.\ndf_france = df_france.sort_index()\n\n# Replacing 'NaN' values with the last valid observation\ndf_france.AverageTemperature.fillna(method='pad', inplace=True)\n\n# Extract Out the Timeseries values part\ntimeseries = df_france.AverageTemperature\n\n#----------------------------- ARIMA ---------------------------------------\nsize = int(len(timeseries) - 9) #creating tran and test datasets -> only keeping 9 values !\ntrain, test = timeseries[0:size], timeseries[size:len(timeseries)]\n\nprevious_samples = [x for x in train] #converting into list\n\nfor t in range(len(test)): \n model = ARIMA(train[0], order=(10, 0, 1))\n model_fit = model.fit(disp=0)\n output = model_fit.forecast()\n yhat = output[0] #get the first element which is the forecast, we don't need the rest\n obs = test[t]\n previous_samples.append(obs) #have to add observation for fitting model\n print('predicted=%f, expected=%f' % ((yhat), (obs)))\n \n \n# =============================================================================\n# Additional Example \n# =============================================================================\nfrom statsmodels.tsa.arima_model import ARIMA\nimport pandas as pd\nimport numpy as np\n\ndf = pd.read_csv('../ReferenceFiles/TimeSer.csv', header=0)\ndf['Date'] = pd.to_datetime(df['Date'])\nindexed_df = df.set_index('Date')\ntimeseries = indexed_df['Value']\n\nsplit_point = len(timeseries) - 10\ndataset, validation = timeseries[0:split_point], timeseries[split_point:]\nprint('Dataset %d, Validation %d' % (len(dataset), len(validation)))\n\nhistory = [x for x in dataset]\npredictions = list()\nprint('Printing Predicted vs Expected Values...')\nprint('\\n')\n\nfor t in range(len(validation)):\n model = ARIMA(history, order=(2,1,1))\n model_fit = model.fit(disp=0)\n output = model_fit.forecast()\n yhat = output[0]\n predictions.append(float(yhat))\n obs = validation[t]\n history.append(obs)\n print('predicted=%f, expected=%f' % ((yhat), (obs))) \n\n# Plotting the output \nimport matplotlib.pylab as plt\n\nplt.plot(list(validation), color='black', label='Expected Values') #have to convert validation to a list\nplt.plot(predictions, color='blue', label='Predictions')\nplt.legend(loc='best')\nplt.rcParams[\"figure.figsize\"] = [5,10]\nplt.title('Plot of Expected and Predicted Values')\nplt.show() ","sub_path":"ARIMA.py","file_name":"ARIMA.py","file_ext":"py","file_size_in_byte":5385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"309304212","text":"import os\n\nfrom datos import leerDAT\nfrom modulosOperativos import opcionesDelUsuario\nfrom modulosOperativos.opcionesDelUsuario import *\n\nnivel = 0\nnivelEnJuego = 0\npuntos = 0\ntableroEnJuego = {}\n\ndef perdedorSinMovimientos():\n \"\"\"la funcion avisa al usuario que se quedo sin movimientos, muestra puntaje y nivel adquirido\"\"\"\n global puntos, nivelEnJuego\n os.system(\"cls\")\n puntos = int(puntos) + int(leerDAT.leerDAT(\"puntaje\", \"ganador\")) #int(linecache.getline(\"configuracion.dat\", 23)[0:4]) + puntos\n print(\"Sin movimientos. Perdiste\")\n print(\"Nivel alcanzado:\", nivelEnJuego + 1)\n print(\"Puntaje final: \", puntos)\n print(\"\")\n input(\"Ingresar 's' para continuar \")\n opcionesDelUsuario.opcionDelUsuarioMenuInicial()\n\n\ndef ganadorTodoApagado():\n \"\"\"la funcion avisa al usuario que completo el nivel o los cinco del juego, muestra puntaje y nivel adquirido\"\"\"\n os.system(\"cls\")\n global puntos, nivelEnJuego, tableroEnJuego\n puntos = int(puntos) + int(leerDAT.leerDAT(\"puntaje\", \"ganador\"))\n print(\"Felicitaciones. Completaste el nivel\", nivelEnJuego + 1)\n print(\"Puntaje parcial: 500\")\n print(\"Puntaje total: \", puntos)\n input(\"Ingresar 's' para continuar \")\n if nivelEnJuego == 4:\n print(\"Todos los niveles completados\")\n input(\"Ingresar 's' para continuar \")\n opcionesDelUsuario.opcionDelUsuarioMenuInicial()\n nivelEnJuego = int(nivelEnJuego) + 1\n estructuraDelJuego.desarrolloDelJuego(nivelEnJuego)\n\n\ndef reiniciarNivel(puntosDeNivel):\n \"\"\"la funcion implementa el reinicio de un nivel, muestra mensaje, nivel y puntaje correspondientes\"\"\"\n os.system(\"cls\")\n global puntos, nivelEnJuego, nivel\n puntos = puntos + puntosDeNivel\n print(\"Reiniciar Nivel \", nivelEnJuego + 1)\n print(\"Puntos totales: \", puntos)\n print(\"Puntos de nivel: \", puntosDeNivel)\n input(\"Ingresar 's' para continuar \")\n estructuraDelJuego.desarrolloDelJuego(nivelEnJuego)\n\n\ndef abandonarJuego():\n \"\"\"la funcion implementa el abandono del juego, muestra mensaje, nivel y puntaje correspondientes\"\"\"\n global puntos, nivelEnJuego\n print(\"\")\n print(\"Juego abortado\")\n print(\"Nivel alcanzado \", (nivelEnJuego + 1))\n print(\"Puntaje final: \", puntos)\n print(\"\")\n input(\"Ingresar 's' para continuar \")\n estructuraDelJuego.mostrarMenuInicial()","sub_path":"modulosOperativos/dinamicaDelJuego/resultadosYEstados.py","file_name":"resultadosYEstados.py","file_ext":"py","file_size_in_byte":2345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"111039196","text":"from cities import PLACE_DICT\r\nimport re\r\n\r\n\r\n\r\n# return values\r\nGOOD_SCORE = 'green'\r\nNEUTRAL_SCORE = 'yellow'\r\nBAD_SCORE = 'red'\r\nNO_ONES_TALKING = 'no-one is talking about this'\r\n##\r\nSAMPLE_SIZE_TOO_SMALL_CUTOFF = 20000 # number of people in the city per tweet, if greater than this, the tweets are insignificant\r\nDIFFERENCE_BAD_GOOD_TOO_SMALL = 5\r\nTOO_FEW_TWEETS = 20 #so few tweets altogether to say 'no one's talking about this'\r\n\r\n\r\nclass RateMyTown():\r\n # init with number of positive tweets, number of negatives and location - should be one from list\r\n def __init__(self, location, positive, negative):\r\n self.location = location\r\n self.positive = positive\r\n self.negative = negative\r\n\r\n def CalculateScore(self):\r\n if self.positive + self.negative <= TOO_FEW_TWEETS:\r\n return NO_ONES_TALKING\r\n\r\n population = PLACE_DICT[self.location]['pop']\r\n\r\n #first if there are really few tweets compared to population size, return neutral score - insufficient data\r\n if population / (self.positive + self.negative) > SAMPLE_SIZE_TOO_SMALL_CUTOFF:\r\n print('ratio of population to tweets too small: ', population / (self.positive + self.negative))\r\n return NO_ONES_TALKING\r\n else:\r\n # now, if the positive and negative tweets are near enough equal in number, return neutral\r\n if abs(self.positive - self.negative) <= DIFFERENCE_BAD_GOOD_TOO_SMALL:\r\n return NEUTRAL_SCORE\r\n else:\r\n if self.positive > self.negative:\r\n return GOOD_SCORE\r\n else:\r\n return BAD_SCORE\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n rateMyTown = RateMyTown('London',900,600)\r\n result = rateMyTown.CalculateScore()\r\n print('London ', result)\r\n","sub_path":"town_rater.py","file_name":"town_rater.py","file_ext":"py","file_size_in_byte":1827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"500677553","text":"from django.shortcuts import render\n\n# Create your views here.\n\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.template import RequestContext, loader\nfrom .models import Problem, Tag, News, Submission\n\n# form .models import User\nfrom django.shortcuts import get_object_or_404, render, redirect\nfrom django.core.urlresolvers import reverse\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.models import User, Permission\nfrom django.db import connection\nimport random\n\ndef index(request):\n problem_list = Problem.objects.order_by('id')[:10]\n User_list = User.objects.order_by('id')\n Tag_list = Tag.objects.order_by('id')\n News_list = News.objects.order_by('id')\n\n if not request.session.get('member_id', None):\n return HttpResponseRedirect('/polls/login')\n request.session['member_id'] = 0\n\n member_id = request.session['member_id']\n \n add_Tag = False;\n\n user = User.objects.get(id = member_id)\n if user is not None:\n add_Tag = user.has_perm(\"add_Tag\")\n\n context = {'problem_list': problem_list,\n 'User_list': User_list, 'Tag_list': Tag_list, \n 'member_id': member_id, 'News_list': News_list,\n 'add_Tag': add_Tag, 'top_rank_list': get_rank(0)}\n \n return render(request, 'polls/home.html', context)\n\n\ndef view_login(request):\n flag = False\n request.session['pro'] = 0\n request.session['member_id'] = 0\n\n logout(request)\n\n if request.method == 'POST':\n username = request.POST['name']\n password = request.POST['password']\n user = authenticate(username=username, password = password)\n user1 = user\n if user is not None:\n login(request, user)\n request.session['member_id'] = user.id\n return HttpResponseRedirect('/polls/user/' + str(user.id))\n \n else:\n flag = True;\n request.session['member_id'] = 0\n\n request.session['member_id'] = 0;\n context = {'flag': flag, 'member_id': 0, 'top_rank_list': get_rank(0)}\n return render(request, 'polls/login.html', context)\n\n\ndef problem(request, problem_id):\n member_id = request.session['member_id']\n problem = get_object_or_404(Problem, id = problem_id)\n top_rank_list = get_rank(0)\n request.session['pro'] = problem_id\n return render(request, 'polls/problem.html', {'problem': problem, 'member_id': member_id, 'top_rank_list':top_rank_list})\n\n\ndef problemN(request, problem_name):\n member_id = request.session['member_id']\n problem = get_object_or_404(Problem, problemName = problem_name)\n top_rank_list = get_rank(0)\n request.session['pro'] = problem.id\n return render(request, 'polls/problem.html', {'problem': problem, 'member_id': member_id, 'top_rank_list':top_rank_list})\n\n\ndef userN(request, user_name):\n user = get_object_or_404(User, username = user_name)\n member_id = request.session['member_id']\n top_rank_list = get_rank(0)\n return render(request, 'polls/user.html', {'user': user, 'member_id': member_id, 'top_rank_list': top_rank_list})\n\n\nimport datetime\nfrom django.utils.timezone import utc\nimport json\n\ndef user(request, user_id):\n result = get_rank(1)\n user = get_object_or_404(User, id = user_id)\n member_id = request.session['member_id']\n top_rank_list = get_rank(0)\n \n rank = 0\n for x in result:\n if x['userName'] == user.first_name:\n rank = x['rank_index']\n solved = x['solved']\n\n AC = Submission.objects.filter(user = user).filter(status = \"Accepted\").count()\n WA = Submission.objects.filter(user = user).filter(status = \"Wrong answer\").count()\n TLE = Submission.objects.filter(user = user).filter(status = \"Time limit exceeded\").count()\n \n s = Submission.objects.filter(user = user).filter(status = \"Accepted\")\n Try = Submission.objects.filter(user = user).count()\n\n data = [0] * 32\n for x in s:\n data[x.submittedDate.day] = data[x.submittedDate.day] + 1;\n\n Data = []\n for d in range(1, 10):\n xx = str(12) + \".\" + str(d)\n Data.append({'x': xx, 'y': data[d]})\n\n return render(request, 'polls/user.html', {'solved': solved, 'Try': Try, 'Data': Data, 'rank': rank, 'user': user, 'member_id': member_id, 'top_rank_list': top_rank_list, 'AC': AC, 'WA':WA, 'TLE':TLE})\n\n\ndef createTag(request):\n return render(request, 'polls/createTag.html', {})\n\n\ndef createTagAction(request):\n name = request.POST['TagName']\n a = Tag(tagName = name)\n a.save()\n\n Tag_list = Tag.objects.order_by('id')\n context = {'Tag_list': Tag_list, 'name': name}\n \n return render(request, 'polls/createdTagResult.html', context)\n\n\ndef createUser(request):\n return render(request, 'polls/createUser.html', {})\n\n\ndef createUserAction(request):\n fname = request.POST['firstName']\n lname = request.POST['lastName']\n pword = request.POST['password']\n e = request.POST['email']\n s = request.POST['sex']\n \n a = User(firstName = fname, lastName = lname, password = pword,\n email = e, sex = s)\n a.save()\n \n User_list = User.objects.order_by('id')\n context = {'User_list': User_list, 'fname': fname}\n \n return render(request, 'polls/createdUserResult.html', context)\n\n\ndef createProblem(request):\n return render(request, 'polls/createProblem.html', {})\n\n\ndef createProblemAction(request):\n name = request.POST['name']\n st = request.POST['statement']\n i = request.POST['input']\n o = request.POST['output']\n t = request.POST['timelimit']\n e = request.POST['extra']\n a = Problem(problemName = name, problemStatement = st, timelimit = float(t), input = i, output = o,\n extraInformation = e)\n a.save()\n \n problem_list = Problem.objects.order_by('id')\n context = {'Problem_list': problem_list, 'problemName': name}\n \n return render(request, 'polls/createdProblemResult.html', context)\n\n'''\nfor i in range(1, 100000):\n name = \"problem \" + str(i)\n st = \"statement\"\n i = \"input\"\n o = \"output\"\n t = 1.0\n e = \"extraInformation\"\n a = Problem(problemName = name, problemStatement = st, timelimit = float(t), input = i, output = o, extraInformation = e)\n a.save()\n\nfor i in range(2, 100):\n p = Problem.objects.get(id = random.randint(10, 100))\n u = User.objects.get(id = random.randint(2, 105))\n x = random.randint(1, 100)\n st = 'WA'\n if (x < 50): st = \"TLE\"\n if (x == 1): st = \"AC\"\n Submission(user = u, problem = p, status = st)\n Submission.save() \n'''\n\ndef get_rank(n):\n def dictfetchall(cursor): \n desc = cursor.description \n return [\n dict(zip([col[0] for col in desc], row)) \n for row in cursor.fetchall()\n ]\n\n cursor = connection.cursor()\n if n != 0:\n cursor.execute(\"SELECT * FROM rank1\")\n else:\n cursor.execute(\"SELECT * FROM rank1 LIMIT 10\")\n result = dictfetchall(cursor)\n return result\n\n\ndef rank(request):\n result = get_rank(1)\n\n if not request.session.get('member_id', None):\n request.session['member_id'] = 0\n member_id = request.session['member_id']\n\n context = {'rank_list': result, 'member_id': member_id}\n \n return render(request, 'polls/rank.html', context)\n\n\ndef submit(request):\n top_rank_list = get_rank(0)\n if not request.session.get('pro', None):\n request.session['pro'] = 0;\n return render(request, 'polls/submit.html', {'member_id': request.session['member_id'], 'top_rank_list': top_rank_list, 'pro': request.session['pro']})\n\nimport os\nimport decimal\nimport math\n\ndef submitAction(request):\n user = User.objects.get(id = request.session['member_id'])\n problemId = request.POST['problemId']\n\n problem = Problem.objects.get(id = problemId)\n l = request.POST['lang']\n code = request.POST['code']\n \n inn = problem.input\n out = problem.output\n with open(\"input.txt\", \"w\") as text_file:\n text_file.write(inn)\n with open(\"answer.txt\", \"w\") as text_file:\n text_file.write(out)\n\n with open(\"ab.cpp\", \"w\") as text_file:\n text_file.write(code)\n f = os.popen(\"python3 judge.py\")\n temp = f.read()\n flag = temp.split('\\n')[1]\n time = temp.split('\\n')[0]\n\n a = Submission(user = user, problem = problem, Language = l, status = flag, code = code, Time = time)\n a.save()\n\n a = Submission.objects.filter(problem = problem).filter(user = user).order_by('-id')[:1]\n a = a[0]\n with open(\"Sub/\" + str(a.id) + \".txt\", \"w\") as text_file:\n text_file.write(code)\n \n return HttpResponseRedirect('/polls/status/1')\n\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n\ndef problems(request, page):\n problem_list = Problem.objects.order_by('id')\n paginator = Paginator(problem_list, 20)\n\n if not request.session.get('member_id', None):\n request.session['member_id'] = 0\n member_id = request.session['member_id']\n\n try:\n problem_list = paginator.page(page)\n except PageNotAnInteger:\n # If page is not an integer, deliver first page.\n problem_list = paginator.page(1)\n except EmptyPage:\n # If page is out of range (e.g. 9999), deliver last page of results.\n problem_list = paginator.page(paginator.num_pages)\n\n pTag = []\n solved = []\n iam = []\n if member_id != 0:\n user = User.objects.get(id = member_id)\n for x in problem_list:\n pTag.append(x.tag.all())\n solved.append(Submission.objects.filter(problem = x).filter(status = \"Accepted\").count())\n if member_id == 0:\n iam.append(0)\n elif member_id != 0:\n if Submission.objects.filter(problem = x).filter(status = \"Accepted\").filter(user = user).count() > 0:\n iam.append(1)\n elif Submission.objects.filter(problem = x).filter(user = user).count() > 0:\n iam.append(2)\n else :\n iam.append(0)\n\n\n pages = []\n for i in range(1, problem_list.paginator.num_pages + 1):\n if problem_list.paginator.num_pages == problem_list.number and i == problem_list.number - 1:\n pages.append(0) \n elif 1 == problem_list.number and i == problem_list.number + 2:\n pages.append(0)\n if i <= 2: \n pages.append(i)\n elif problem_list.paginator.num_pages - i <= 1:\n pages.append(i)\n elif abs(i - problem_list.number) <= 1:\n if problem_list.number - i == 1 and problem_list.number - 2 != 2:\n pages.append(0)\n pages.append(i)\n if i - problem_list.number == 1 and problem_list.number != problem_list.paginator.num_pages - 3:\n pages.append(0)\n\n list = zip(problem_list.object_list, pTag, solved, iam)\n return render(request, 'polls/problems.html', {'list': list, 'pTag': pTag, 'member_id': request.session['member_id'], 'top_rank_list': get_rank(0), \"problem_list\": problem_list, \"pages\": pages})\n\n\ndef tag(request, tag_id, page):\n t = Tag.objects.filter(id = tag_id)\n problem_list = Problem.objects.order_by('id').filter(tag = t)\n paginator = Paginator(problem_list, 20)\n\n if not request.session.get('member_id', None):\n request.session['member_id'] = 0\n member_id = request.session['member_id']\n\n try:\n problem_list = paginator.page(page)\n except PageNotAnInteger:\n # If page is not an integer, deliver first page.\n problem_list = paginator.page(1)\n except EmptyPage:\n # If page is out of range (e.g. 9999), deliver last page of results.\n problem_list = paginator.page(paginator.num_pages)\n\n pTag = []\n iam = []\n solved = []\n if member_id != 0:\n user = User.objects.get(id = member_id)\n for x in problem_list:\n pTag.append(x.tag.all())\n solved.append(Submission.objects.filter(problem = x).filter(status = \"Accepted\").count())\n if member_id == 0:\n iam.append(0)\n elif member_id != 0:\n if Submission.objects.filter(problem = x).filter(status = \"Accepted\").filter(user = user).count() > 0:\n iam.append(1)\n elif Submission.objects.filter(problem = x).filter(user = user).count() > 0:\n iam.append(2)\n else :\n iam.append(0)\n\n pages = []\n for i in range(1, problem_list.paginator.num_pages + 1):\n if problem_list.paginator.num_pages == problem_list.number and i == problem_list.number - 1:\n pages.append(0) \n elif 1 == problem_list.number and i == problem_list.number + 2:\n pages.append(0)\n if i <= 2: \n pages.append(i)\n elif problem_list.paginator.num_pages - i <= 1:\n pages.append(i)\n elif abs(i - problem_list.number) <= 1:\n if problem_list.number - i == 1 and problem_list.number - 2 != 2:\n pages.append(0)\n pages.append(i)\n if i - problem_list.number == 1 and problem_list.number != problem_list.paginator.num_pages - 3:\n pages.append(0)\n\n list = zip(problem_list.object_list, pTag, solved, iam)\n return render(request, 'polls/tag.html', {'tag_id': tag_id, 'list': list, 'pTag': pTag, 'member_id': request.session['member_id'], 'top_rank_list': get_rank(0), \"problem_list\": problem_list, \"pages\": pages})\n\n\ndef level(request, l, page):\n problem_list = Problem.objects.filter(level = l)\n paginator = Paginator(problem_list, 20)\n\n if not request.session.get('member_id', None):\n request.session['member_id'] = 0\n member_id = request.session['member_id']\n\n try:\n problem_list = paginator.page(page)\n except PageNotAnInteger:\n # If page is not an integer, deliver first page.\n problem_list = paginator.page(1)\n except EmptyPage:\n # If page is out of range (e.g. 9999), deliver last page of results.\n problem_list = paginator.page(paginator.num_pages)\n\n pTag = []\n iam = []\n solved = []\n if member_id != 0:\n user = User.objects.get(id = member_id)\n for x in problem_list:\n pTag.append(x.tag.all())\n solved.append(Submission.objects.filter(problem = x).filter(status = \"Accepted\").count())\n if member_id == 0:\n iam.append(0)\n elif member_id != 0:\n if Submission.objects.filter(problem = x).filter(status = \"Accepted\").filter(user = user).count() > 0:\n iam.append(1)\n elif Submission.objects.filter(problem = x).filter(user = user).count() > 0:\n iam.append(2)\n else :\n iam.append(0)\n\n pages = []\n for i in range(1, problem_list.paginator.num_pages + 1):\n if problem_list.paginator.num_pages == problem_list.number and i == problem_list.number - 1:\n pages.append(0) \n elif 1 == problem_list.number and i == problem_list.number + 2:\n pages.append(0)\n if i <= 2: \n pages.append(i)\n elif problem_list.paginator.num_pages - i <= 1:\n pages.append(i)\n elif abs(i - problem_list.number) <= 1:\n if problem_list.number - i == 1 and problem_list.number - 2 != 2:\n pages.append(0)\n pages.append(i)\n if i - problem_list.number == 1 and problem_list.number != problem_list.paginator.num_pages - 3:\n pages.append(0)\n\n list = zip(problem_list.object_list, pTag, solved, iam)\n return render(request, 'polls/level.html', {'level_id': l, 'list': list, 'pTag': pTag, 'member_id': request.session['member_id'], 'top_rank_list': get_rank(0), \"problem_list\": problem_list, \"pages\": pages})\n\n\ndef pstatus(request, pid, page):\n x = Problem.objects.get(id = pid)\n status_list = Submission.objects.filter(problem = x).filter(status = \"Accepted\")\n paginator = Paginator(status_list, 20)\n\n if not request.session.get('member_id', None):\n request.session['member_id'] = 0\n member_id = request.session['member_id']\n\n try:\n status_list = paginator.page(page)\n except PageNotAnInteger:\n # If page is not an integer, deliver first page.\n status_list = paginator.page(1)\n except EmptyPage:\n # If page is out of range (e.g. 9999), deliver last page of results.\n status_list = paginator.page(paginator.num_pages)\n\n pages = []\n for i in range(1, status_list.paginator.num_pages + 1):\n if status_list.paginator.num_pages == status_list.number and i == status_list.number - 1:\n pages.append(0) \n elif 1 == status_list.number and i == status_list.number + 2:\n pages.append(0)\n if i <= 2: \n pages.append(i)\n elif status_list.paginator.num_pages - i <= 1:\n pages.append(i)\n elif abs(i - status_list.number) <= 1:\n if status_list.number - i == 1 and status_list.number - 2 != 2:\n pages.append(0)\n pages.append(i)\n if i - status_list.number == 1 and status_list.number != status_list.paginator.num_pages - 3:\n pages.append(0)\n \n return render(request, 'polls/pstatus.html', {'pid': pid, 'member_id': request.session['member_id'], 'top_rank_list': get_rank(0), \"status_list\": status_list, \"pages\": pages})\n\ndef status(request, page):\n status_list = Submission.objects.order_by('-id')\n paginator = Paginator(status_list, 10)\n\n if not request.session.get('member_id', None):\n request.session['member_id'] = 0\n member_id = request.session['member_id']\n\n try:\n status_list = paginator.page(page)\n except PageNotAnInteger:\n status_list = paginator.page(1)\n except EmptyPage:\n status_list = paginator.page(paginator.num_pages)\n\n pages = []\n for i in range(1, status_list.paginator.num_pages + 1):\n if status_list.paginator.num_pages == status_list.number and i == status_list.number - 1:\n pages.append(0) \n elif 1 == status_list.number and i == status_list.number + 2:\n pages.append(0)\n if i <= 2: \n pages.append(i)\n elif status_list.paginator.num_pages - i <= 1:\n pages.append(i)\n elif abs(i - status_list.number) <= 1:\n if status_list.number - i == 1 and status_list.number - 2 != 2:\n pages.append(0)\n pages.append(i)\n if i - status_list.number == 1 and status_list.number != status_list.paginator.num_pages - 3:\n pages.append(0)\n\n return render(request, 'polls/status.html', {'member_id': member_id, 'top_rank_list': get_rank(0), \"status_list\": status_list, \"pages\": pages})\n\n\ndef demo_piechart(request):\n \"\"\"\n pieChart page\n \"\"\"\n if not request.session.get('member_id', None):\n request.session['member_id'] = 0\n member_id = request.session['member_id']\n\n return render(request, 'polls/piechart.html', {'member_id': member_id})\n\n\nfrom django.contrib.auth.models import User, Group\nfrom rest_framework import viewsets\nfrom .serializers import UserSerializer, GroupSerializer, ProblemSerializer, SubmissionSerializer\n\n\nclass UserViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows users to be viewed or edited.\n \"\"\"\n queryset = User.objects.all().order_by('-date_joined')\n serializer_class = UserSerializer\n\n\nclass GroupViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows groups to be viewed or edited.\n \"\"\"\n queryset = Group.objects.all()\n serializer_class = GroupSerializer\n\n\nclass ProblemViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows Problems to be viewed or edited.\n \"\"\"\n queryset = Problem.objects.all()\n serializer_class = ProblemSerializer\n\n\nclass SubmissionViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows Problems to be viewed or edited.\n \"\"\"\n queryset = Submission.objects.all()\n serializer_class = SubmissionSerializer ","sub_path":"polls/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":20025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"83569939","text":"import math\r\nimport numpy as np\r\nimport scipy.optimize as opt\r\nimport matplotlib.pyplot as plt\r\n\r\nclass rectangularSection:\r\n def __init__(self,b,h,steelHeights,barDiameters,barQuantities,step,units='mm'):\r\n allowableUnits = ['mm','m']\r\n if units not in allowableUnits:\r\n raise Exception(\"Only mm or m allowed as section size units\")\r\n elif units == 'm':\r\n self.b = b*1000\r\n self.h = h*1000\r\n self.steelHeights = np.array(steelHeights*1000)\r\n self.barDiameters = np.array(barDiameters*1000)\r\n self.barQuantities = np.array(barQuantities)\r\n self.steelAreas = np.array(barQuantities)*0.25*(np.array(barDiameters)*1000)**2*math.pi\r\n self.concreteHeights = np.arange(0,h*1000,step*1000)\r\n self.step = step*1000\r\n else:\r\n self.b = b\r\n self.h = h\r\n self.steelHeights = np.array(steelHeights)\r\n self.barDiameters = np.array(barDiameters)\r\n self.barQuantities = np.array(barQuantities)\r\n self.steelAreas = np.array(barQuantities)*0.25*np.array(barDiameters)**2*math.pi\r\n self.step = step\r\n self.concreteHeights = np.arange(0,h,step)\r\n\r\n def getConcreteForces(self,strains,b,concreteMaterial):\r\n stress = np.where(strains>0,0,np.where(strains\"\n print(\"Please specify a building id\")\n exit(1)\n\ndata_points = file_len('Temperature2015.csv')\n\n# open csv files\ntemp_file = open('Temperature2015.csv', 'r')\nis_class_file = open('IsClass2015.csv', 'r')\nis_wkend_file = open('IsWeekend2015.csv', 'r')\nquarter_file = open('QuarterOfDay.csv', 'r')\ncoefs_file = open('CoefsLinreg.csv', 'r')\n\n# get coefficients for the current building from the coef_file\ncoefs_reader = csv.reader(coefs_file, delimiter=',', quoting=csv.QUOTE_NONE)\nfor row in coefs_reader:\n if row[0].strip() == building_name:\n matrix_X = [float(row[1]), float(row[2]), float(row[3]), float(row[4]), float(row[5]), float(row[6]),\n float(row[7]), float(row[8])]\n break\n\n# set up B matrices\nmatrices_B = []\nfor x in range(0, data_points):\n q = [int(x) for x in quarter_file.readline().split(',')]\n tmp = [1, int(temp_file.readline()), int(is_class_file.readline()), int(is_wkend_file.readline()), q[0], q[1], q[2],\n q[3]]\n matrices_B.append(tmp)\n\n# calculate value and send it on\nfor B in matrices_B:\n val = 0.0\n for x in range(0, len(matrix_X)):\n val += matrix_X[x] * B[x]\n\n # print(val)\n sock.sendto(str(val), (udp_ip, udp_port,))\n time.sleep(0.5)\n","sub_path":"docker2/messenger.py","file_name":"messenger.py","file_ext":"py","file_size_in_byte":1810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"423780626","text":"#------------------------------------------------------------------\r\n#\r\n# Digital Tutors - Advanced PyQt for Maya\r\n# 2. Introducing QDialog and QMainWindow\r\n#\r\n#------------------------------------------------------------------\r\n\r\n# 모듈 임포트\r\nimport PySide2.QtCore as qc # 윈도우 핸들, 다른 위젯과의 시그널 \r\nimport PySide2.QtGui as qg # 그래픽 요소, 레이블.. 어쩌구 \r\nimport PySide2.QtWidgets as qw # 위젯들\r\n\r\n\r\n# 디알로그\r\ndialog = qw.QDialog()\r\ndialog.show()\r\n\r\n\r\n# 윈도우\r\nwindow = qw.QMainWindow()\r\nwindow.show()\r\n\r\n\r\n# 메뉴바\r\nmenubar = qw.QMenuBar()\r\nwindow = qw.QMainWindow()\r\nwindow.setMenuBar( menubar )\r\nwindow.show()\r\n\r\nmenubar.addAction('File')\r\nmenubar.addAction('Edit')\r\nmenubar.addSeparator()\r\nmenubar.addAction('Modify')\r\nmenubar.addAction('Create')\r\nmenubar.addAction('Display')\r\n\r\n\r\n# 툴바\r\ntoolbar = qw.QToolBar()\r\nwindow = qw.QMainWindow()\r\nwindow.addToolBar(toolbar)\r\nwindow.show()\r\n\r\ntoolbar.addAction('File')\r\ntoolbar.addAction('Edit')\r\ntoolbar.addSeparator()\r\ntoolbar.addAction('Modify')\r\ntoolbar.addAction('Create')\r\ntoolbar.addAction('Display')\r\n\r\n\r\n# 상태바\r\nstatusbar = qw.QStatusBar()\r\nwindow = qw.QMainWindow()\r\nwindow.setStatusBar(statusbar)\r\nwindow.show()\r\n\r\nstatusbar.showMessage('Loading...', 2000)\r\n\r\n\r\n# 디알로그, 타이틀\r\ndialog = qw.QDialog()\r\ndialog.setWindowTitle('Simple Dialog')\r\ndialog.show()\r\n\r\ndialog.setModal(False) # 작동 안함. 아래 클래스에 넣어서 사용 하면 작동함.. 왜그런지 모르겠음.\r\n\r\n\r\n# 디알로그, (제일 앞에 있게 다른 윈도우, 다른 프로그램 창들 보다..)\r\ndialog = qw.QDialog()\r\ndialog.setWindowFlags(qc.Qt.WindowStaysOnTopHint) # 다른 프로그램 창들보다 위에 있게됨.\r\ndialog.show()\r\n\r\n\r\n# 디알로그, 윈도우 타이틀, 고정 크기\r\ndialog = qw.QDialog()\r\ndialog.setWindowFlags(qc.Qt.WindowStaysOnTopHint)\r\ndialog.show()\r\n\r\ndialog.setWindowTitle('Simple Dialog')\r\ndialog.setModal(False)\r\ndialog.setFixedHeight(250)\r\ndialog.setFixedWidth(300)\r\n\r\n\r\n# 디알로그, 가로 고정, 상하크기 변형가능\r\ndialog = qw.QDialog()\r\ndialog.setWindowFlags(qc.Qt.WindowStaysOnTopHint)\r\ndialog.show()\r\n\r\n#dialog.setMinimumHeight(250)\r\ndialog.setMaximumHeight(250)\r\ndialog.setFixedWidth(300)\r\n\r\n\r\n# 클래스 사용\r\nclass SimpleUI(qw.QDialog):\r\n def __init__(self):\r\n qw.QDialog.__init__(self)\r\n\r\ndialog = SimpleUI()\r\ndialog.show()\r\n \r\n \r\n# 클래스 사용2\r\nclass SimpleUI(qw.QDialog):\r\n def __init__(self):\r\n qw.QDialog.__init__(self)\r\n \r\n self.setWindowFlags(qc.Qt.WindowStaysOnTopHint)\r\n \r\n self.setWindowTitle('Simple Dialog')\r\n self.setModal(True) # 모달이 이제서야 작동함.\r\n dialog.setMinimumHeight(250)\r\n self.setFixedWidth(300)\r\n\r\ndialog = SimpleUI()\r\ndialog.show()\r\n\r\n\r\n# 클래스 사용\r\nclass SimpleUI(qw.QDialog):\r\n def __init__(self):\r\n super(SimpleUI,self).__init__() # 초기화에 super사용\r\n \r\n self.setWindowFlags(qc.Qt.WindowStaysOnTopHint) \r\n self.setWindowTitle('Simple Dialog')\r\n self.setModal(True)\r\n dialog.setMinimumHeight(250)\r\n self.setFixedWidth(300)\r\n\r\ndialog = SimpleUI()\r\ndialog.show()\r\n\r\n#------------------------------------------------------------------\r\n#\r\n# Digital Tutors - Advanced PyQt for Maya\r\n# 3. Exploring layout\r\n#\r\n#------------------------------------------------------------------\r\n\r\n\r\n# HBoxLayout, VBoxLayout 레이아웃\r\nclass SimpleUI(qw.QDialog):\r\n def __init__(self):\r\n super(SimpleUI,self).__init__()\r\n \r\n self.setWindowFlags(qc.Qt.WindowStaysOnTopHint) \r\n self.setWindowTitle('Simple Dialog')\r\n self.setModal(False)\r\n self.setMinimumSize(200,300)\r\n\r\ndialog = SimpleUI()\r\ndialog.show()\r\n\r\n#dialog.setLayout(qw.QVBoxLayout()) # Vbox layout\r\ndialog.setLayout(qw.QHBoxLayout()) # Hbox layout\r\n\r\nbtn1 = qw.QPushButton('btn1')\r\nbtn2 = qw.QPushButton('btn2')\r\nbtn3 = qw.QPushButton('btn3')\r\n\r\ndialog.layout().addWidget(btn1)\r\ndialog.layout().addWidget(btn2)\r\ndialog.layout().addWidget(btn3)\r\n\r\n\r\n# FormLayout 레이아웃\r\nclass SimpleUI(qw.QDialog):\r\n def __init__(self):\r\n super(SimpleUI,self).__init__() \r\n self.setWindowFlags(qc.Qt.WindowStaysOnTopHint) \r\n self.setWindowTitle('Simple Dialog')\r\ndialog = SimpleUI()\r\ndialog.show()\r\n\r\ndialog.setLayout(qw.QFormLayout()) # Hbox layout\r\nformlayout = dialog.layout()\r\n\r\nname_le = qw.QLineEdit()\r\nemail_le = qw.QLineEdit()\r\nage_le = qw.QSpinBox()\r\nbtn = qw.QPushButton('button')\r\n\r\nformlayout.addRow('Name:',name_le)\r\nformlayout.addRow('Email:',email_le)\r\nformlayout.addRow('Age:',age_le)\r\nformlayout.addWidget(btn)\r\n\r\n\r\n# GridLayout 레이아웃\r\nclass SimpleUI(qw.QDialog):\r\n def __init__(self):\r\n super(SimpleUI,self).__init__() \r\n self.setWindowFlags(qc.Qt.WindowStaysOnTopHint) \r\n self.setWindowTitle('Simple Dialog')\r\ndialog = SimpleUI()\r\ndialog.show()\r\n\r\ndialog.setLayout(qw.QGridLayout()) # Hbox layout\r\nmyLayout = dialog.layout()\r\n\r\nfontName_lb = qw.QLabel('Font')\r\nfontStyle_lb = qw.QLabel('Font Style')\r\nfontSize_lb = qw.QLabel('Font Size')\r\n\r\nfontName_list = qw.QListWidget()\r\nfontName_list.addItem('Times')\r\nfontName_list.addItem('Helvetica')\r\nfontName_list.addItem('Courier')\r\nfontName_list.addItem('Palatino')\r\nfontName_list.addItem('Gill Sans')\r\n\r\nfontStyle_list = qw.QListWidget()\r\nfontStyle_list.addItem('Roman')\r\nfontStyle_list.addItem('Italic')\r\nfontStyle_list.addItem('Oblique')\r\n\r\nfontSize_list = qw.QListWidget()\r\nfor i in range(10,30,2):\r\n fontSize_list.addItem( str(i) )\r\n \r\nmyLayout.addWidget( fontName_lb, 0,0 )\r\nmyLayout.addWidget( fontName_list, 1,0 )\r\nmyLayout.addWidget( fontStyle_lb, 0,1 )\r\nmyLayout.addWidget( fontStyle_list, 1,1 )\r\nmyLayout.addWidget( fontSize_lb, 0,2 )\r\nmyLayout.addWidget( fontSize_list, 1,2 )\r\n\r\n\r\n# StackedLayout 레이아웃\r\nfrom functools import partial\r\n\r\nclass SimpleUI(qw.QDialog):\r\n def __init__(self):\r\n super(SimpleUI,self).__init__() \r\n self.setWindowFlags(qc.Qt.WindowStaysOnTopHint) \r\n self.setWindowTitle('Simple Dialog')\r\ndialog = SimpleUI()\r\ndialog.show()\r\n\r\ndialog.setLayout(qw.QVBoxLayout())\r\ntopLayout = dialog.layout()\r\n\r\nstacked_lay = qw.QStackedLayout()\r\ntopLayout.addLayout(stacked_lay)\r\n\r\nhbox_lay = qw.QHBoxLayout()\r\nlay_btn1 = qw.QPushButton('Layout1')\r\nlay_btn2 = qw.QPushButton('Layout2')\r\nlay_btn3 = qw.QPushButton('Layout3')\r\nlay_btn4 = qw.QPushButton('Layout4')\r\nhbox_lay.addWidget(lay_btn1)\r\nhbox_lay.addWidget(lay_btn2)\r\nhbox_lay.addWidget(lay_btn3)\r\nhbox_lay.addWidget(lay_btn4)\r\ntopLayout.addLayout( hbox_lay )\r\n\r\n\r\nvbox_widget = qw.QWidget()\r\nvbox_lay = qw.QVBoxLayout()\r\nvbox_widget.setLayout(vbox_lay)\r\nvbox_btn1 = qw.QPushButton('Layout1')\r\nvbox_btn2 = qw.QPushButton('Layout2')\r\nvbox_btn3 = qw.QPushButton('Layout3')\r\nvbox_btn4 = qw.QPushButton('Layout4')\r\nvbox_lay.addWidget(vbox_btn1)\r\nvbox_lay.addWidget(vbox_btn2)\r\nvbox_lay.addWidget(vbox_btn3)\r\nvbox_lay.addWidget(vbox_btn4)\r\n\r\n\r\nhbox_widget = qw.QWidget()\r\nhbox_lay = qw.QHBoxLayout()\r\nhbox_widget.setLayout(hbox_lay)\r\nhbox_btn1 = qw.QPushButton('Layout1')\r\nhbox_btn2 = qw.QPushButton('Layout2')\r\nhbox_btn3 = qw.QPushButton('Layout3')\r\nhbox_btn4 = qw.QPushButton('Layout4')\r\nhbox_lay.addWidget(hbox_btn1)\r\nhbox_lay.addWidget(hbox_btn2)\r\nhbox_lay.addWidget(hbox_btn3)\r\nhbox_lay.addWidget(hbox_btn4)\r\n\r\n\r\nform_widget = qw.QWidget()\r\nform_lay = qw.QFormLayout()\r\nform_widget.setLayout( form_lay )\r\nform_le1 = qw.QLineEdit()\r\nform_le2 = qw.QLineEdit()\r\nform_le3 = qw.QSpinBox()\r\nform_btn = qw.QPushButton('button')\r\nform_lay.addRow('Name:',form_le1)\r\nform_lay.addRow('Email:',form_le2)\r\nform_lay.addRow('Age:',form_le3)\r\nform_lay.addWidget(form_btn) \r\n\r\n\r\ngrid_widget = qw.QWidget()\r\ngrid_layout = qw.QGridLayout()\r\ngrid_widget.setLayout( grid_layout )\r\ngrid_lb1 = qw.QLabel('Font')\r\ngrid_lb2 = qw.QLabel('Font Style')\r\ngrid_lb3 = qw.QLabel('Font Size')\r\ngrid_wg1 = qw.QListWidget()\r\ngrid_wg1.addItem('Times')\r\ngrid_wg1.addItem('Helvetica')\r\ngrid_wg1.addItem('Courier')\r\ngrid_wg1.addItem('Palatino')\r\ngrid_wg1.addItem('Gill Sans')\r\ngrid_wg2 = qw.QListWidget()\r\ngrid_wg2.addItem('Roman')\r\ngrid_wg2.addItem('Italic')\r\ngrid_wg2.addItem('Oblique')\r\ngrid_wg3 = qw.QListWidget()\r\nfor i in range(10,30,2):\r\n grid_wg3.addItem( str(i) ) \r\ngrid_layout.addWidget( grid_lb1, 0,0 )\r\ngrid_layout.addWidget( grid_wg1, 1,0 )\r\ngrid_layout.addWidget( grid_lb2, 0,1 )\r\ngrid_layout.addWidget( grid_wg2, 1,1 )\r\ngrid_layout.addWidget( grid_lb3, 0,2 )\r\ngrid_layout.addWidget( grid_wg3, 1,2 )\r\n\r\n\r\nstacked_lay.addWidget( vbox_widget )\r\nstacked_lay.addWidget( hbox_widget )\r\nstacked_lay.addWidget( form_widget )\r\nstacked_lay.addWidget( grid_widget )\r\n\r\n\r\nlay_btn1.clicked.connect( partial(stacked_lay.setCurrentIndex, 0) )\r\nlay_btn2.clicked.connect( partial(stacked_lay.setCurrentIndex, 1) )\r\nlay_btn3.clicked.connect( partial(stacked_lay.setCurrentIndex, 2) )\r\nlay_btn4.clicked.connect( partial(stacked_lay.setCurrentIndex, 3) )\r\n\r\n\r\n\r\n#------------------------------------------------------------------\r\n#\r\n# Digital Tutors - Advanced PyQt for Maya\r\n# 4. Modifying layout\r\n#\r\n#------------------------------------------------------------------\r\n\r\nclass SimpleUI(qw.QDialog):\r\n def __init__(self):\r\n super(SimpleUI,self).__init__() \r\n self.setWindowFlags(qc.Qt.WindowStaysOnTopHint) \r\n self.setWindowTitle('Simple Dialog')\r\ndialog = SimpleUI()\r\ndialog.show()\r\n\r\ntop_layout = qw.QVBoxLayout()\r\ndialog.setLayout(top_layout)\r\n\r\ntop_frame = qw.QFrame()\r\nmid_frame = qw.QFrame()\r\nbtm_frame = qw.QFrame()\r\ntop_frame.setFrameStyle( qw.QFrame.Panel | qw.QFrame.Raised )\r\nmid_frame.setFrameStyle( qw.QFrame.Panel | qw.QFrame.Raised )\r\nbtm_frame.setFrameStyle( qw.QFrame.Panel | qw.QFrame.Raised )\r\n\r\ntop_layout.addWidget(top_frame)\r\ntop_layout.addWidget(mid_frame)\r\ntop_layout.addWidget(btm_frame)\r\ntop_layout.setContentsMargins(5,5,5,5)\r\ntop_layout.setSpacing(5)\r\n\r\nmid_layout = qw.QHBoxLayout()\r\nmid_frame.setLayout( mid_layout )\r\nmid_btn1 = qw.QPushButton('1')\r\nmid_btn2 = qw.QPushButton('2')\r\nmid_btn3 = qw.QPushButton('3')\r\nmid_btn4 = qw.QPushButton('4')\r\nmid_btn5 = qw.QPushButton('5')\r\nmid_layout.addWidget(mid_btn1)\r\nmid_layout.addWidget(mid_btn2)\r\nmid_layout.addWidget(mid_btn3)\r\nmid_layout.addWidget(mid_btn4)\r\nmid_layout.addWidget(mid_btn5)\r\n\r\nmid_layout.setContentsMargins(5,5,5,5)\r\nmid_layout.setSpacing(2)\r\nmid_layout.setAlignment(qc.Qt.AlignTop)\r\n\r\nmid_frame.setSizePolicy( qw.QSizePolicy.Minimum, qw.QSizePolicy.Maximum )\r\n\r\n\r\n\r\n#------------------------------------------------------------------\r\n#\r\n# Digital Tutors - Advanced PyQt for Maya\r\n# 5. Using standard widgets\r\n#\r\n#------------------------------------------------------------------\r\n","sub_path":"pyside/pyside2_study.py","file_name":"pyside2_study.py","file_ext":"py","file_size_in_byte":10846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"107253686","text":"import time\nimport server_pb2\n\nfrom grma.server.base import ServerBase\n\nONE_DAY_IN_SECONDS = 60 * 60 * 24\n\n\nclass ServiceImpl(server_pb2.BetaSimpleServiceServicer):\n def hello(self, request, context):\n say = request.say\n return server_pb2.HelloResponse(reply='you said: %s' % say)\n\n Hello = hello\n\n\nclass Server(ServerBase):\n def __init__(self):\n server = server_pb2.beta_create_SimpleService_server(\n ServiceImpl()\n )\n self.server = server\n self.started = False\n\n def bind(self, host, port, private_key_path='', certificate_chain_path=''):\n # return 0 if cannot binded\n r = self.server.add_insecure_port('%s:%s' % (host, port))\n return r\n\n def start(self):\n \"\"\"start server\"\"\"\n self.server.start()\n self.started = True\n try:\n while self.started:\n time.sleep(ONE_DAY_IN_SECONDS)\n except KeyboardInterrupt:\n self.stop()\n\n def stop(self, grace=0):\n self.server.stop(0)\n self.started = False\n\n\n# entry point of grma\napp = Server()\n","sub_path":"examples/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"561272497","text":"import os\nimport shutil\nimport util_fasta, util_caracteristicas\nfrom sklearn.externals.joblib import Parallel, delayed, dump, load\nimport hashlib\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.svm import SVC\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.preprocessing import RobustScaler\nfrom sklearn.model_selection import LeaveOneGroupOut, GridSearchCV, StratifiedKFold\nfrom sklearn.metrics import classification_report, confusion_matrix, precision_recall_fscore_support, precision_recall_curve, average_precision_score, roc_curve, roc_auc_score\nimport hashlib\nimport util_fasta\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn.utils import shuffle\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout\nfrom keras.wrappers.scikit_learn import KerasClassifier\nfrom keras.constraints import maxnorm\nfrom keras.utils import to_categorical\n\nclass Tesis2():\n def __init__(self, carpeta_base=\".\", n_jobs=-1, verbose=0, tuned_parameters=[{'svc__kernel': ['rbf'], 'svc__gamma': [1e-3], 'svc__C': [0.1,0.5,0.9,2]}], score = ['accuracy','precision','recall']):\n self.carpeta_base = carpeta_base\n self.n_jobs = n_jobs\n self.verbose = verbose\n self.tuned_parameters = tuned_parameters\n self.score = score\n self.carpeta_data_base = self.carpeta_base + \"/data\"\n self.carpeta_fold_base = self.carpeta_base + \"/folds\"\n self.carpeta_modelo_base = self.carpeta_base + \"/modelo_final\"\n if not os.path.isdir(self.carpeta_base):\n os.mkdir(self.carpeta_base)\n self.diamond_db = \"./feature_engine/Diamond_BD/uniprot-viridiplantae-reviewed.dmnd\"\n self.modelo_final_generado = False\n self.modelo_referencial_generado = False\n \n def generar_modelo_final(self):\n if (self.verbose > 1): print(\"*************************************************\")\n if (self.verbose > 1): print(\"*************** Generando llaves ****************\")\n if (self.verbose > 1): print(\"*************************************************\")\n self.generar_llaves_clases()\n if (self.verbose > 1): print(\"*************************************************\")\n if (self.verbose > 1): print(\"***************** Armando folds *****************\")\n if (self.verbose > 1): print(\"*************************************************\")\n self.armar_folds()\n if (self.verbose > 1): print(\"*************************************************\")\n if (self.verbose > 1): print(\"***** Generando modelo cpat para cada fold ******\")\n if (self.verbose > 1): print(\"*************************************************\")\n self.generar_cpats_de_folds()\n if (self.verbose > 1): print(\"*************************************************\")\n if (self.verbose > 1): print(\"*** Ejecutando cpat y diamond sobre los folds ***\")\n if (self.verbose > 1): print(\"*************************************************\")\n self.ejecutar_cpat_diamond_folds()\n if (self.verbose > 1): print(\"*************************************************\")\n if (self.verbose > 1): print(\"************* Serializando features *************\")\n if (self.verbose > 1): print(\"*************************************************\")\n self.generar_features_folds()\n if (self.verbose > 1): print(\"*************************************************\")\n if (self.verbose > 1): print(\"************ Generando modelo final *************\")\n if (self.verbose > 1): print(\"*************************************************\")\n self.entrenar_modelo_final()\n self.modelo_final_generado = True\n if (self.verbose > 1): print(\"*************************************************\")\n if (self.verbose > 1): print(\"******** Limpiando archivos intermedios *********\")\n if (self.verbose > 1): print(\"*************************************************\")\n self.limpiar_archivos_intermedios()\n if (self.verbose > 1):\n print(\"*************************************************\")\n print(\"************* Mostrando resultados **************\")\n print(\"*************************************************\")\n self.mostrar_resultados()\n \n def generar_modelo_final_keras(self):\n if (self.verbose > 1): print(\"*************************************************\")\n if (self.verbose > 1): print(\"*************** Generando llaves ****************\")\n if (self.verbose > 1): print(\"*************************************************\")\n self.generar_llaves_clases()\n if (self.verbose > 1): print(\"*************************************************\")\n if (self.verbose > 1): print(\"***************** Armando folds *****************\")\n if (self.verbose > 1): print(\"*************************************************\")\n self.armar_folds()\n if (self.verbose > 1): print(\"*************************************************\")\n if (self.verbose > 1): print(\"***** Generando modelo cpat para cada fold ******\")\n if (self.verbose > 1): print(\"*************************************************\")\n self.generar_cpats_de_folds()\n if (self.verbose > 1): print(\"*************************************************\")\n if (self.verbose > 1): print(\"*** Ejecutando cpat y diamond sobre los folds ***\")\n if (self.verbose > 1): print(\"*************************************************\")\n self.ejecutar_cpat_diamond_folds()\n if (self.verbose > 1): print(\"*************************************************\")\n if (self.verbose > 1): print(\"************* Serializando features *************\")\n if (self.verbose > 1): print(\"*************************************************\")\n self.generar_features_folds()\n if (self.verbose > 1): print(\"*************************************************\")\n if (self.verbose > 1): print(\"************ Generando modelo final *************\")\n if (self.verbose > 1): print(\"*************************************************\")\n self.entrenar_modelo_final_keras()\n self.modelo_final_generado = True\n if (self.verbose > 1): print(\"*************************************************\")\n if (self.verbose > 1): print(\"******** Limpiando archivos intermedios *********\")\n if (self.verbose > 1): print(\"*************************************************\")\n self.limpiar_archivos_intermedios()\n if (self.verbose > 1):\n print(\"*************************************************\")\n print(\"************* Mostrando resultados **************\")\n print(\"*************************************************\")\n self.mostrar_resultados()\n \n def generar_modelos_referenciales(self):\n if (self.verbose > 1): print(\"*************************************************\")\n if (self.verbose > 1): print(\"******* Generando modelos referenciales *********\")\n if (self.verbose > 1): print(\"*************************************************\")\n self.entrenar_modelos_referenciales()\n self.modelo_referencial_generado = True\n if (self.modelo_final_generado and self.verbose > 1):\n print(\"*************************************************\")\n print(\"************* Mostrando resultados **************\")\n print(\"*************************************************\")\n self.mostrar_resultados_referencial_vs_final()\n \n def carpeta_data(self):\n return self.carpeta_data_base\n\n def carpeta_fold(self):\n return self.carpeta_fold_base\n\n def carpeta_modelo(self):\n return self.carpeta_modelo_base\n \n def folder_clase(self, num_clase):\n return self.carpeta_data() + \"/clase_\" + str(num_clase)\n\n def archivo_clase(self, num_clase, tipo):\n return self.folder_clase(num_clase) + \"/\" + tipo + \".fa\"\n\n def obtener_num_clases(self):\n num_clases = 0\n if not os.path.isfile(self.carpeta_base + \"/num_clases.bin\"):\n while os.path.isdir(self.folder_clase(num_clases + 1)):\n num_clases = num_clases + 1\n dump(num_clases, self.carpeta_base + \"/num_clases.bin\")\n num_clases = load(self.carpeta_base + \"/num_clases.bin\")\n return num_clases\n\n def iterador_clases(self):\n return range(1, self.obtener_num_clases() + 1)\n\n def obtener_primera_secuencia(self, num_clase):\n secuencias = util_fasta.leer_fasta(self.archivo_clase(num_clase, \"lncRNA\"), 1)\n return list(secuencias.keys())[0]\n\n def obtener_todas_las_secuencias(self):\n return {num_clase : self.obtener_primera_secuencia(num_clase) for num_clase in self.iterador_clases()}\n\n def generar_llaves_clases(self):\n secuencias = self.obtener_todas_las_secuencias()\n llaves = {num_clase : \"\" for num_clase in secuencias.keys()}\n llaves[0] = \"\" #llave cero corresponde a todo el universo\n for i_clase in self.iterador_clases():\n llaves[0] += secuencias[i_clase]\n for j_clase in self.iterador_clases():\n if (i_clase != j_clase):\n llaves[j_clase] += secuencias[i_clase]\n llaves[0] = hashlib.sha224(llaves[0].encode()).hexdigest()\n for i_clase in self.iterador_clases():\n llaves[i_clase] = hashlib.sha224(llaves[i_clase].encode()).hexdigest()\n dump(llaves, self.carpeta_base + \"/llaves_clases.bin\")\n\n def obtener_llaves_clases(self):\n return load(self.carpeta_base + \"/llaves_clases.bin\")\n\n def carpeta_fold_clase(self, llave):\n return self.carpeta_fold() + \"/fold_clase_\" + str(llave)\n\n def archivo_fold_clase(self, llave, tipoTrainTest, tipoRNA):\n return self.carpeta_fold_clase(llave) + \"/\" + tipoTrainTest + \"/\" + tipoRNA + \".fa\"\n\n def armar_fold_final(self, tipo):\n llave = self.obtener_llaves_clases()[0]\n with open(self.archivo_fold_clase(llave, \"train\", tipo), \"w+\") as outfile:\n for num_clase in self.iterador_clases():\n with open(self.archivo_clase(num_clase, tipo), \"r\") as infile:\n for inline in infile:\n outfile.write(inline)\n\n def armar_fold_clase(self, num_clase):\n llave = self.obtener_llaves_clases()[num_clase]\n os.mkdir(self.carpeta_fold_clase(llave))\n os.mkdir(self.carpeta_fold_clase(llave) + \"/train\")\n for tipo in [\"lncRNA\", \"PCT\", \"CDS\"]:\n with open(self.archivo_fold_clase(llave, \"train\", tipo), \"w+\") as outfile:\n for j_num_clase in self.iterador_clases():\n if num_clase != j_num_clase:\n with open(self.archivo_clase(j_num_clase, tipo)) as infile:\n for inline in infile:\n outfile.write(inline)\n os.mkdir(self.carpeta_fold_clase(llave) + \"/test\")\n for tipo in [\"lncRNA\", \"PCT\"]:\n with open(self.archivo_fold_clase(llave, \"test\", tipo), \"w+\") as outfile:\n with open(self.archivo_clase(num_clase, tipo)) as infile:\n for inline in infile:\n outfile.write(inline)\n \n def armar_folds(self):\n if os.path.isdir(self.carpeta_fold()):\n shutil.rmtree(self.carpeta_fold())\n os.mkdir(self.carpeta_fold())\n llave = self.obtener_llaves_clases()[0]\n if not os.path.isdir(self.carpeta_fold_clase(llave)):\n os.mkdir(self.carpeta_fold_clase(llave))\n if not os.path.isdir(self.carpeta_fold_clase(llave) + \"/train\"):\n os.mkdir(self.carpeta_fold_clase(llave) + \"/train\")\n\n Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(delayed(wrapper_armar_fold_final)(self, tipo) for tipo in [\"lncRNA\", \"PCT\", \"CDS\"])\n Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(delayed(wrapper_armar_fold_clase)(self, num_clase) for num_clase in self.iterador_clases())\n \n def carpeta_fold_cpat(self, llave):\n return self.carpeta_fold_clase(llave) + \"/cpat\"\n\n def generar_cpat_fold(self, num_clase):\n llave = self.obtener_llaves_clases()[num_clase]\n archivo_lncRNA = self.archivo_fold_clase(llave, \"train\", \"lncRNA\")\n archivo_PCT = self.archivo_fold_clase(llave, \"train\", \"PCT\")\n archivo_CDS = self.archivo_fold_clase(llave, \"train\", \"CDS\")\n carpeta_cpat = self.carpeta_fold_cpat(llave)\n if not os.path.isdir(carpeta_cpat):\n os.mkdir(carpeta_cpat)\n util_caracteristicas.generar_modelo_CPAT(archivo_lncRNA, archivo_PCT, archivo_CDS, carpeta_cpat)\n\n def generar_cpat_final(self):\n llave = self.obtener_llaves_clases()[0]\n archivo_lncRNA = self.archivo_fold_clase(llave, \"train\", \"lncRNA\")\n archivo_PCT = self.archivo_fold_clase(llave, \"train\", \"PCT\")\n archivo_CDS = self.archivo_fold_clase(llave, \"train\", \"CDS\")\n carpeta_cpat = self.carpeta_fold_cpat(llave)\n if not os.path.isdir(carpeta_cpat):\n os.mkdir(carpeta_cpat)\n util_caracteristicas.generar_modelo_CPAT(archivo_lncRNA, archivo_PCT, archivo_CDS, carpeta_cpat)\n\n def limpieza_archivos_CDS(self, num_clase):\n llave = self.obtener_llaves_clases()[num_clase]\n os.remove(self.archivo_fold_clase(llave, \"train\", \"CDS\"))\n \n def generar_cpats_de_folds(self):\n self.generar_cpat_final()\n Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(delayed(wrapper_generar_cpat_fold)(self, num_clase) for num_clase in self.iterador_clases())\n self.limpieza_archivos_CDS(0)\n Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(delayed(wrapper_limpieza_archivos_CDS)(self, num_clase) for num_clase in self.iterador_clases())\n \n def ejecutar_cpat_fold(self, num_clase):\n llave = self.obtener_llaves_clases()[num_clase]\n archivo_lncRNA = self.archivo_fold_clase(llave, \"train\", \"lncRNA\")\n archivo_PCT = self.archivo_fold_clase(llave, \"train\", \"PCT\")\n carpeta_cpat = self.carpeta_fold_cpat(llave)\n util_caracteristicas.ejecutar_cpat(archivo_lncRNA, carpeta_cpat, archivo_lncRNA.replace(\".fa\", \".cpat\"))\n os.remove(archivo_lncRNA.replace(\".fa\", \".cpat\") + \".dat\")\n os.remove(archivo_lncRNA.replace(\".fa\", \".cpat\") + \".r\")\n util_caracteristicas.ejecutar_cpat(archivo_PCT, carpeta_cpat, archivo_PCT.replace(\".fa\", \".cpat\"))\n os.remove(archivo_PCT.replace(\".fa\", \".cpat\") + \".dat\")\n os.remove(archivo_PCT.replace(\".fa\", \".cpat\") + \".r\")\n\n def ejecutar_cpat_diamond_final(self):\n llave = self.obtener_llaves_clases()[0]\n archivo_lncRNA = self.archivo_fold_clase(llave, \"train\", \"lncRNA\")\n archivo_PCT = self.archivo_fold_clase(llave, \"train\", \"PCT\")\n diamond_db = self.diamond_db\n carpeta_cpat = self.carpeta_fold_cpat(llave)\n util_caracteristicas.ejecutar_diamond(archivo_lncRNA, diamond_db, archivo_lncRNA.replace(\".fa\", \".dmnd\"))\n util_caracteristicas.ejecutar_diamond(archivo_PCT, diamond_db, archivo_PCT.replace(\".fa\", \".dmnd\"))\n util_caracteristicas.ejecutar_cpat(archivo_lncRNA, carpeta_cpat, archivo_lncRNA.replace(\".fa\", \".cpat\"))\n os.remove(archivo_lncRNA.replace(\".fa\", \".cpat\") + \".dat\")\n os.remove(archivo_lncRNA.replace(\".fa\", \".cpat\") + \".r\")\n util_caracteristicas.ejecutar_cpat(archivo_PCT, carpeta_cpat, archivo_PCT.replace(\".fa\", \".cpat\"))\n os.remove(archivo_PCT.replace(\".fa\", \".cpat\") + \".dat\")\n os.remove(archivo_PCT.replace(\".fa\", \".cpat\") + \".r\")\n \n def ejecutar_cpat_diamond_folds(self):\n self.ejecutar_cpat_diamond_final()\n Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(delayed(wrapper_ejecutar_cpat_fold)(self, num_clase) for num_clase in self.iterador_clases())\n \n def archivo_features_clase(self, llave, tipoTrainTest, tipoRNA):\n return self.archivo_fold_clase(llave, tipoTrainTest, tipoRNA).replace(\".fa\", \".ft\")\n\n def generar_features_fold(self, num_clase):\n llave = self.obtener_llaves_clases()[num_clase]\n features_base_lncRNA = self.archivo_features_clase(self.obtener_llaves_clases()[0], \"train\", \"lncRNA\")\n features_base_PCT = self.archivo_features_clase(self.obtener_llaves_clases()[0], \"train\", \"PCT\")\n for tipo in [\"train\", \"test\"]:\n archivo_lncRNA = self.archivo_fold_clase(llave, tipo, \"lncRNA\")\n archivo_PCT = self.archivo_fold_clase(llave, tipo, \"PCT\")\n archivo_cpat_lncRNA = self.archivo_fold_clase(llave, \"train\", \"lncRNA\").replace(\".fa\", \".cpat\")\n archivo_cpat_PCT = self.archivo_fold_clase(llave, \"train\", \"PCT\").replace(\".fa\", \".cpat\")\n salida_lncRNA = self.archivo_features_clase(llave, tipo, \"lncRNA\")\n salida_PCT = self.archivo_features_clase(llave, tipo, \"PCT\")\n util_caracteristicas.generar_features(archivo_lncRNA, features_base_lncRNA, archivo_cpat_lncRNA, salida_lncRNA)\n util_caracteristicas.generar_features(archivo_PCT, features_base_PCT, archivo_cpat_PCT, salida_PCT)\n\n def generar_features_final(self):\n llave = self.obtener_llaves_clases()[0]\n archivo_lncRNA = self.archivo_fold_clase(llave, \"train\", \"lncRNA\")\n archivo_PCT = self.archivo_fold_clase(llave, \"train\", \"PCT\")\n salida_lncRNA = self.archivo_features_clase(llave, \"train\", \"lncRNA\")\n salida_PCT = self.archivo_features_clase(llave, \"train\", \"PCT\")\n util_caracteristicas.generar_features_base(archivo_lncRNA, archivo_lncRNA.replace(\".fa\", \".cpat\"), archivo_lncRNA.replace(\".fa\", \".dmnd\"), salida_lncRNA)\n util_caracteristicas.generar_features_base(archivo_PCT, archivo_PCT.replace(\".fa\", \".cpat\"), archivo_PCT.replace(\".fa\", \".dmnd\"), salida_PCT)\n \n def generar_features_folds(self):\n self.generar_features_final()\n Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(delayed(wrapper_generar_features_fold)(self, num_clase) for num_clase in self.iterador_clases())\n \n def obtener_data_entrenamiento(self):\n llave = self.obtener_llaves_clases()[0]\n codigos_lncRNA = util_fasta.leer_fasta_list(self.archivo_fold_clase(llave, \"train\", \"lncRNA\"))\n codigos_PCT = util_fasta.leer_fasta_list(self.archivo_fold_clase(llave, \"train\", \"PCT\"))\n\n codigos_lncRNA = [(x[0],\"\") for x in codigos_lncRNA]\n codigos_PCT = [(x[0],\"\") for x in codigos_PCT]\n cantidad_transcritos = len(codigos_lncRNA)//self.obtener_num_clases()\n\n y = ([1] * len(codigos_lncRNA)) + ([0] * len(codigos_PCT))\n groups = list()\n for _ in range(2):\n for num_clase in self.iterador_clases():\n groups += ([num_clase] * (cantidad_transcritos))\n return codigos_lncRNA + codigos_PCT, y, groups, cantidad_transcritos\n \n def obtener_data_entrenamiento_keras(self):\n llave = self.obtener_llaves_clases()[0]\n codigos_lncRNA = util_fasta.leer_fasta_list(self.archivo_fold_clase(llave, \"train\", \"lncRNA\"))\n codigos_PCT = util_fasta.leer_fasta_list(self.archivo_fold_clase(llave, \"train\", \"PCT\"))\n\n codigos_lncRNA = [(x[0],\"\") for x in codigos_lncRNA]\n codigos_PCT = [(x[0],\"\") for x in codigos_PCT]\n cantidad_transcritos = len(codigos_lncRNA)//self.obtener_num_clases()\n\n y = ([1] * len(codigos_lncRNA)) + ([0] * len(codigos_PCT))\n groups = list()\n for _ in range(2):\n for num_clase in self.iterador_clases():\n groups += ([num_clase] * (cantidad_transcritos))\n x = codigos_lncRNA + codigos_PCT\n \n reserva_x = list()\n reserva_y = list()\n reserva_groups = list()\n iremove = list()\n num_transcritos = len(x)\n num_transcritos_por_grupo = cantidad_transcritos\n for i in range(num_transcritos//(num_transcritos_por_grupo*2)):\n reserva_x.append(x[i * num_transcritos_por_grupo])\n reserva_y.append(y[i * num_transcritos_por_grupo])\n reserva_groups.append(groups[i * num_transcritos_por_grupo])\n iremove.insert(0, i * num_transcritos_por_grupo)\n for i in iremove:\n del x[i]\n del y[i]\n del groups[i]\n x, y, groups = shuffle(x, y, groups, random_state=7)\n for i in range(num_transcritos//(num_transcritos_por_grupo*2)):\n x.insert(i, reserva_x[i])\n y.insert(i, reserva_y[i])\n groups.insert(i, reserva_groups[i])\n \n return x, y, groups, cantidad_transcritos\n\n def entrenar_modelo_final(self):\n if os.path.isdir(self.carpeta_modelo()):\n shutil.rmtree(self.carpeta_modelo())\n os.mkdir(self.carpeta_modelo())\n X_train, y_train, groups, cantidad_transcritos = self.obtener_data_entrenamiento()\n svm_pipeline = Pipeline(steps=[('features', GeneradorFeatures(self, cantidad_transcritos, self.obtener_num_clases())), ('scaler', RobustScaler()), ('svc', SVC())])\n logo = LeaveOneGroupOut()\n clf = GridSearchCV(svm_pipeline, self.tuned_parameters, cv=logo, scoring=self.score, n_jobs=self.n_jobs, refit=\"accuracy\", return_train_score = True, verbose=self.verbose)\n clf.fit(X_train, y_train, groups) #requerido por LeaveOneGroupOut\n resultado = {\n \"accuracy\" : clf.cv_results_['mean_test_accuracy'][clf.best_index_],\n \"precision\" : clf.cv_results_['mean_test_precision'][clf.best_index_],\n \"recall\" : clf.cv_results_['mean_test_recall'][clf.best_index_]\n }\n dump(resultado, self.carpeta_modelo() + \"/resultado.bin\")\n dump(clf.best_params_, self.carpeta_modelo() + \"/params.bin\")\n dump(clf.cv_results_, self.carpeta_modelo() + \"/cv_results.bin\")\n dump(clf.best_estimator_, self.carpeta_modelo() + \"/modelo.plk\")\n\n def entrenar_modelo_final_keras(self):\n if os.path.isdir(self.carpeta_modelo()):\n shutil.rmtree(self.carpeta_modelo())\n os.mkdir(self.carpeta_modelo())\n X_train, y_train, groups, cantidad_transcritos = self.obtener_data_entrenamiento_keras()\n #X_train, y_train, groups = shuffle(X_train, y_train, groups, random_state=7)\n keras_pipeline = Pipeline(steps=[('features', GeneradorFeaturesKeras(self, cantidad_transcritos, self.obtener_num_clases())), ('scaler', RobustScaler()), ('keras', KerasClassifier(build_fn=crear_modelo_keras, verbose=0))])\n logo = LeaveOneGroupOut()\n clf = GridSearchCV(keras_pipeline, self.tuned_parameters, cv=logo, scoring=self.score, n_jobs=self.n_jobs, refit=\"accuracy\", return_train_score = True, verbose=self.verbose)\n \n clf.fit(X_train, y_train, groups) #requerido por LeaveOneGroupOut\n resultado = {\n \"accuracy\" : clf.cv_results_['mean_test_accuracy'][clf.best_index_],\n \"precision\" : clf.cv_results_['mean_test_precision'][clf.best_index_],\n \"recall\" : clf.cv_results_['mean_test_recall'][clf.best_index_]\n }\n dump(resultado, self.carpeta_modelo() + \"/resultado.bin\")\n dump(clf.best_params_, self.carpeta_modelo() + \"/params.bin\")\n dump(clf.cv_results_, self.carpeta_modelo() + \"/cv_results.bin\")\n dump(clf.best_estimator_, self.carpeta_modelo() + \"/modelo.plk\")\n \n def limpieza_archivos_finales_fasta_ruta(self, llave):\n shutil.rmtree(self.carpeta_fold_clase(llave))\n\n def limpieza_archivos_finales_fasta(self, num_clase):\n llave = self.obtener_llaves_clases()[num_clase]\n if num_clase == 0:\n shutil.rmtree(self.carpeta_fold_clase(llave) + \"/train\")\n else:\n self.limpieza_archivos_finales_fasta_ruta(llave)\n \n def limpiar_archivos_intermedios(self):\n self.limpieza_archivos_finales_fasta(0)\n Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(delayed(wrapper_limpieza_archivos_finales_fasta)(self, num_clase) for num_clase in self.iterador_clases())\n self.features_pre_generados = False\n \n def mostrar_resultados(self):\n if (not self.modelo_final_generado):\n print(\"Debe generar el modelo final\")\n return\n display(load(self.carpeta_modelo() + \"/cv_results.bin\"))\n display(load(self.carpeta_modelo() + \"/params.bin\"))\n display(load(self.carpeta_modelo() + \"/resultado.bin\"))\n if (self.modelo_referencial_generado):\n self.mostrar_resultados_referencial_vs_final()\n \n def devolver_resultado(self):\n return load(self.carpeta_modelo() + \"/resultado.bin\")\n \n def devolver_mejor_parametro(self):\n return load(self.carpeta_modelo() + \"/params.bin\")\n \n def devolver_cv_results(self):\n return load(self.carpeta_modelo() + \"/cv_results.bin\")\n \n def devolver_mejor_modelo(self):\n return load(self.carpeta_modelo() + \"/modelo.plk\")\n \n def preparar_data_modelo_referencial(self, num_clase):\n carpeta_base_referencial = self.carpeta_base + \"/modelos_referenciales/clase_\" + str(num_clase)\n if not os.path.isdir(carpeta_base_referencial):\n os.mkdir(carpeta_base_referencial)\n if not os.path.isdir(carpeta_base_referencial + \"/data\"):\n os.mkdir(carpeta_base_referencial + \"/data\")\n clase_positiva = util_fasta.leer_fasta_list(self.archivo_clase(num_clase, \"lncRNA\"))\n PCT = util_fasta.leer_fasta(self.archivo_clase(num_clase, \"PCT\"))\n CDS = util_fasta.leer_fasta(self.archivo_clase(num_clase, \"CDS\"))\n clase_negativa = list()\n for k in PCT.keys():\n clase_negativa.append((k, PCT[k], CDS[k]))\n X = clase_positiva + clase_negativa\n y = ([1] * len(clase_positiva)) + ([0] * len(clase_negativa))\n skf = StratifiedKFold(n_splits=10)\n isplit = 1\n for _, test in skf.split(X, y):\n split_lncRNA = list()\n split_PCT = list()\n split_CDS = list()\n for itest in test:\n if y[itest] == 1:\n split_lncRNA.append(X[itest])\n else:\n split_PCT.append((X[itest][0], X[itest][1]))\n split_CDS.append((X[itest][0], X[itest][2]))\n if not os.path.isdir(carpeta_base_referencial + \"/data/clase_\" + str(isplit)):\n os.mkdir(carpeta_base_referencial + \"/data/clase_\" + str(isplit))\n util_fasta.generar_fasta(split_lncRNA, carpeta_base_referencial + \"/data/clase_\" + str(isplit) + \"/lncRNA.fa\")\n util_fasta.generar_fasta(split_PCT, carpeta_base_referencial + \"/data/clase_\" + str(isplit) + \"/PCT.fa\")\n util_fasta.generar_fasta(split_CDS, carpeta_base_referencial + \"/data/clase_\" + str(isplit) + \"/CDS.fa\")\n isplit += 1\n \n def instanciar_modelo_referencial(self, num_clase):\n carpeta_base_referencial = self.carpeta_base + \"/modelos_referenciales/clase_\" + str(num_clase)\n return Tesis2(carpeta_base=carpeta_base_referencial, n_jobs=self.n_jobs, verbose=0, tuned_parameters=self.tuned_parameters, score=self.score)\n \n def crear_modelo_referencial(self, num_clase):\n carpeta_base_referencial = self.carpeta_base + \"/modelos_referenciales/clase_\" + str(num_clase)\n self.preparar_data_modelo_referencial(num_clase)\n tesis2 = self.instanciar_modelo_referencial(num_clase)\n tesis2.generar_modelo_final()\n shutil.rmtree(carpeta_base_referencial + \"/data\")\n \n def entrenar_modelos_referenciales(self):\n if not os.path.isdir(self.carpeta_base + \"/modelos_referenciales\"):\n os.mkdir(self.carpeta_base + \"/modelos_referenciales\")\n Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(delayed(wrapper_crear_modelo_referencial)(self, num_clase) for num_clase in self.iterador_clases())\n \n def obtener_resultados_referencial_vs_final(self):\n resultado_referencial = list()\n resultado_final = list()\n if (not self.modelo_final_generado) or (not self.modelo_referencial_generado):\n print(\"Debe generar ambos modelos para obtener resultados comparativos\")\n return resultado_referencial, resultado_final \n parametros = self.devolver_mejor_parametro()\n resultados = self.devolver_cv_results()\n i_seleccionado = 0\n i_iter = 0\n for param in resultados[\"params\"]:\n if parametros == param:\n i_seleccionado = i_iter\n i_iter += 1\n\n for i in range(self.obtener_num_clases()):\n resultado_referencial.append(self.instanciar_modelo_referencial(i+1).devolver_resultado()[\"accuracy\"])\n resultado_final.append(resultados[\"split\" + str(i) + \"_test_accuracy\"][i_seleccionado])\n return resultado_referencial, resultado_final\n \n def mostrar_resultados_referencial_vs_final(self):\n if (not self.modelo_final_generado) or (not self.modelo_referencial_generado):\n print(\"Debe generar ambos modelos para obtener resultados comparativos\")\n return\n resultado_referencial, resultado_final = self.obtener_resultados_referencial_vs_final()\n for i in range(self.obtener_num_clases()):\n print(\"***************\")\n print(\"*** CLASE \" + str(i+1) + \" ***\")\n print(\"***************\")\n\n acc_mr = resultado_referencial[i]\n acc_mf = resultado_final[i]\n\n print(\"Accuracy modelo referencial: \" + '{:.1%}'.format(acc_mr))\n print(\"Accuracy modelo final: \" + '{:.1%}'.format(acc_mf))\n print(\"\")\n\n print(\"********************\")\n print(\"*** MODELO FINAL ***\")\n print(\"********************\") \n print(\"Accuracy modelo final: \" + '{:.1%}'.format(self.devolver_resultado()[\"accuracy\"]))\n print(\"\")\n \n def generar_predictor_final(self):\n predictor = self.devolver_mejor_modelo()\n llave_fold_final = self.obtener_llaves_clases()[0]\n nuevo_generador_features = GeneradorFeaturesParaPredicciones(carpeta_base=self.carpeta_base, diamond_db=self.diamond_db, carpeta_cpat=self.carpeta_fold_cpat(llave_fold_final))\n predictor.steps.pop(0)\n predictor.steps.insert(0,['features', nuevo_generador_features])\n dump(predictor, self.carpeta_modelo() + \"/modelo_final.plk\")\n \n def reportar_predicciones(self, archivo_lncRNA, archivo_PCT):\n y_pred_lncRNA = self.realizar_predicciones(archivo_lncRNA)\n probs_lncRNA = self.realizar_predicciones_proba(archivo_lncRNA, features_calculados=True)\n y_pred_PCT = self.realizar_predicciones(archivo_PCT)\n probs_PCT = self.realizar_predicciones_proba(archivo_PCT, features_calculados=True)\n \n y_true = ([1] * len(y_pred_lncRNA)) + ([0] * len(y_pred_PCT))\n y_pred = np.concatenate((y_pred_lncRNA, y_pred_PCT))\n \n plt.figure(1)\n probs = np.concatenate((probs_lncRNA, probs_PCT))\n precision, recall, _ = precision_recall_curve(y_true, probs)\n average_precision = average_precision_score(y_true, probs)\n plt.step(recall, precision, color='b', alpha=0.2, where='post')\n plt.fill_between(recall, precision, step='post', alpha=0.2, color='b')\n plt.xlabel('Recall')\n plt.ylabel('Precision')\n plt.ylim([0.0, 1.05])\n plt.xlim([0.0, 1.0])\n plt.title('2-class Precision-Recall curve: AP={0:0.2f}'.format(average_precision))\n \n plt.figure(2)\n fpr, tpr, _ = roc_curve(y_true, probs)\n roc_auc = roc_auc_score(y_true, probs)\n plt.plot(fpr, tpr, label='ROC curve (area = %0.3f)' % roc_auc)\n plt.plot([0, 1], [0, 1], linestyle='--') # random predictions curve\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.0])\n plt.xlabel('False Positive Rate or (1 - Specifity)')\n plt.ylabel('True Positive Rate or (Sensitivity)')\n plt.title('Receiver Operating Characteristic')\n plt.legend(loc=\"lower right\")\n \n return classification_report(y_true, y_pred, target_names=[\"PCT\", \"lncRNA\"]), confusion_matrix(y_true, y_pred), precision_recall_fscore_support(y_true, y_pred, average='binary'), y_true, y_pred, probs, recall, precision, average_precision, fpr, tpr, roc_auc\n \n def reportar_predicciones_keras(self, archivo_lncRNA, archivo_PCT):\n y_pred_lncRNA = self.realizar_predicciones(archivo_lncRNA)\n probs_lncRNA = self.realizar_predicciones_proba_keras(archivo_lncRNA, features_calculados=True)\n y_pred_PCT = self.realizar_predicciones(archivo_PCT)\n probs_PCT = self.realizar_predicciones_proba_keras(archivo_PCT, features_calculados=True)\n \n y_true = ([1] * len(y_pred_lncRNA)) + ([0] * len(y_pred_PCT))\n y_pred = np.concatenate((y_pred_lncRNA, y_pred_PCT))\n \n plt.figure(1)\n probs = np.concatenate((probs_lncRNA[:,1], probs_PCT[:,1]))\n precision, recall, _ = precision_recall_curve(y_true, probs)\n average_precision = average_precision_score(y_true, probs)\n plt.step(recall, precision, color='b', alpha=0.2, where='post')\n plt.fill_between(recall, precision, step='post', alpha=0.2, color='b')\n plt.xlabel('Recall')\n plt.ylabel('Precision')\n plt.ylim([0.0, 1.05])\n plt.xlim([0.0, 1.0])\n plt.title('2-class Precision-Recall curve: AP={0:0.2f}'.format(average_precision))\n \n plt.figure(2)\n fpr, tpr, _ = roc_curve(y_true, probs)\n roc_auc = roc_auc_score(y_true, probs)\n plt.plot(fpr, tpr, label='ROC curve (area = %0.3f)' % roc_auc)\n plt.plot([0, 1], [0, 1], linestyle='--') # random predictions curve\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.0])\n plt.xlabel('False Positive Rate or (1 - Specifity)')\n plt.ylabel('True Positive Rate or (Sensitivity)')\n plt.title('Receiver Operating Characteristic')\n plt.legend(loc=\"lower right\")\n \n return classification_report(y_true, y_pred, target_names=[\"PCT\", \"lncRNA\"]), confusion_matrix(y_true, y_pred), precision_recall_fscore_support(y_true, y_pred, average='binary'), y_true, y_pred, probs, recall, precision, average_precision, fpr, tpr, roc_auc\n \n def realizar_predicciones(self, archivo_fasta, features_calculados=False):\n predictor = load(self.carpeta_modelo() + \"/modelo_final.plk\")\n X_test = util_fasta.leer_fasta_list(archivo_fasta)\n predictor.set_params(features__features_calculados=features_calculados)\n return predictor.predict(X_test)\n \n def realizar_predicciones_proba(self, archivo_fasta, features_calculados=False):\n predictor = load(self.carpeta_modelo() + \"/modelo_final.plk\")\n X_test = util_fasta.leer_fasta_list(archivo_fasta)\n predictor.set_params(features__features_calculados=features_calculados)\n return predictor.decision_function(X_test)\n \n def realizar_predicciones_proba_keras(self, archivo_fasta, features_calculados=False):\n predictor = load(self.carpeta_modelo() + \"/modelo_final.plk\")\n X_test = util_fasta.leer_fasta_list(archivo_fasta)\n predictor.set_params(features__features_calculados=features_calculados)\n return predictor.predict_proba(X_test)\n \n#wrappers para ejecución en paralelo\ndef wrapper_armar_fold_final(tesis2, tipo):\n tesis2.armar_fold_final(tipo)\n \ndef wrapper_armar_fold_clase(tesis2, num_clase):\n tesis2.armar_fold_clase(num_clase)\n \ndef wrapper_generar_cpat_fold(tesis2, num_clase):\n tesis2.generar_cpat_fold(num_clase)\n \ndef wrapper_limpieza_archivos_CDS(tesis2, num_clase):\n tesis2.limpieza_archivos_CDS(num_clase)\n \ndef wrapper_ejecutar_cpat_fold(tesis2, num_clase):\n tesis2.ejecutar_cpat_fold(num_clase)\n \ndef wrapper_generar_features_fold(tesis2, num_clase):\n tesis2.generar_features_fold(num_clase)\n \ndef wrapper_limpieza_archivos_finales_fasta(tesis2, num_clase):\n tesis2.limpieza_archivos_finales_fasta(num_clase)\n\ndef wrapper_crear_modelo_referencial(tesis2, num_clase):\n tesis2.crear_modelo_referencial(num_clase)\n \ndef crear_modelo_keras(optimizer='adam', learn_rate=0.01, momentum=0, init_mode='uniform', activation='relu', activation2='softmax', activation_final='sigmoid', dropout_rate=0.0, weight_constraint=1, neurons=10, hidden_layers=1, hidden_neurons=10):\n #optimizer = SGD(lr=learn_rate, momentum=momentum)\n model = Sequential()\n model.add(Dense(units=neurons, activation=activation, input_dim=10, kernel_initializer=init_mode, kernel_constraint=maxnorm(weight_constraint)))\n for _ in range(hidden_layers):\n model.add(Dropout(dropout_rate))\n model.add(Dense(units=hidden_neurons, activation=activation2, kernel_initializer=init_mode, kernel_constraint=maxnorm(weight_constraint)))\n model.add(Dropout(dropout_rate))\n if activation_final == \"softmax\":\n model.add(Dense(2, activation=activation_final, kernel_initializer=init_mode))\n model.compile(loss='sparse_categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])\n else:\n model.add(Dense(activation=activation_final, kernel_initializer=init_mode))\n model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])\n \n return model\n\nclass GeneradorFeatures(BaseEstimator, TransformerMixin):\n def __init__(self, tesis2=None, cantidad_transcritos=None, num_clases=None):\n if cantidad_transcritos is None:\n return\n self.cantidad_transcritos = cantidad_transcritos\n self.num_clases = num_clases\n self.tesis2 = tesis2\n\n def fit(self, X, y=None):\n self._llave_fold = self.obtener_llave_fold(X)\n return self\n\n def transform(self, X):\n return self.obtener_features_pre_calculados(X)\n\n def obtener_llave_fold(self, X):\n cod_secuencias = \"\"\n num_transcritos = len(X)\n num_transcritos_por_grupo = self.cantidad_transcritos\n for i in range(num_transcritos//(num_transcritos_por_grupo*2)):\n cod_secuencias += X[i * num_transcritos_por_grupo][0]\n llave = hashlib.sha224(cod_secuencias.encode()).hexdigest()\n return llave\n\n def obtener_features_pre_calculados(self, X):\n llave = self._llave_fold\n tipo = \"train\"\n if os.path.isfile(self.tesis2.archivo_fold_clase(llave, \"test\", \"lncRNA\")):\n secuencias = util_fasta.leer_fasta(self.tesis2.archivo_fold_clase(llave, \"test\", \"lncRNA\"), 1)\n if list(secuencias.keys())[0] == X[0][0]:\n tipo = \"test\"\n features = list(load(self.tesis2.archivo_features_clase(llave, tipo, \"lncRNA\")).values())\n features += list(load(self.tesis2.archivo_features_clase(llave, tipo, \"PCT\")).values())\n return [list(x.values()) for x in features]\n \nclass GeneradorFeaturesKeras(BaseEstimator, TransformerMixin):\n def __init__(self, tesis2=None, cantidad_transcritos=None, num_clases=None):\n if cantidad_transcritos is None:\n return\n self.cantidad_transcritos = cantidad_transcritos\n self.num_clases = num_clases\n self.tesis2 = tesis2\n\n def fit(self, X, y=None):\n self._llave_fold = self.obtener_llave_fold(X)\n return self\n\n def transform(self, X):\n return self.obtener_features_pre_calculados(X)\n\n def obtener_llave_fold(self, X):\n cod_secuencias = \"\"\n num_transcritos = len(X)\n num_transcritos_por_grupo = self.cantidad_transcritos\n for i in range(num_transcritos//(num_transcritos_por_grupo*2)):\n cod_secuencias += X[i][0]\n llave = hashlib.sha224(cod_secuencias.encode()).hexdigest()\n return llave\n\n def obtener_features_pre_calculados(self, X):\n llave = self._llave_fold\n tipo = \"train\"\n if os.path.isfile(self.tesis2.archivo_fold_clase(llave, \"test\", \"lncRNA\")):\n secuencias = util_fasta.leer_fasta(self.tesis2.archivo_fold_clase(llave, \"test\", \"lncRNA\"), 1)\n if list(secuencias.keys())[0] == X[0][0]:\n tipo = \"test\"\n features = {**load(self.tesis2.archivo_features_clase(llave, tipo, \"lncRNA\")), **load(self.tesis2.archivo_features_clase(llave, tipo, \"PCT\"))}\n return [list(features[x[0]].values()) for x in X]\n\nclass GeneradorFeaturesParaPredicciones(BaseEstimator, TransformerMixin):\n def __init__(self, carpeta_base=None, diamond_db=None, carpeta_cpat=None, features_calculados=False):\n if carpeta_base is None:\n return\n self.carpeta_base = carpeta_base\n self.diamond_db = diamond_db\n self.carpeta_cpat = carpeta_cpat\n self.features_calculados = features_calculados\n\n def fit(self, X, y=None):\n raise Exception('Este modelo no admite fit')\n return self\n\n def transform(self, X):\n carpeta_transform = self.carpeta_base + \"/transform\"\n if not os.path.isdir(carpeta_transform):\n os.mkdir(carpeta_transform)\n archivo_fasta = carpeta_transform + \"/secuencias.fa\"\n if (not self.features_calculados):\n self.generar_archivos_fasta(archivo_fasta, X)\n self.ejecutar_diamond_cpat(archivo_fasta)\n self.generar_features(archivo_fasta)\n return self.obtener_features_pre_calculados(archivo_fasta)\n \n def generar_archivos_fasta(self, archivo_fasta, X):\n util_fasta.generar_fasta(X, archivo_fasta)\n \n def ejecutar_diamond_cpat(self, archivo_fasta):\n diamond_db = self.diamond_db\n carpeta_cpat = self.carpeta_cpat\n util_caracteristicas.ejecutar_diamond(archivo_fasta, diamond_db, archivo_fasta.replace(\".fa\", \".dmnd\"))\n util_caracteristicas.ejecutar_cpat(archivo_fasta, carpeta_cpat, archivo_fasta.replace(\".fa\", \".cpat\"))\n os.remove(archivo_fasta.replace(\".fa\", \".cpat\") + \".dat\")\n os.remove(archivo_fasta.replace(\".fa\", \".cpat\") + \".r\")\n \n def generar_features(self, archivo_fasta):\n util_caracteristicas.generar_features_base(archivo_fasta, archivo_fasta.replace(\".fa\", \".cpat\"), archivo_fasta.replace(\".fa\", \".dmnd\"), archivo_fasta.replace(\".fa\", \".ft\"))\n\n def obtener_features_pre_calculados(self, archivo_fasta):\n features = list(load(archivo_fasta.replace(\".fa\", \".ft\")).values())\n return [list(x.values()) for x in features]\n","sub_path":"Semana 10/libs/util_modelo_final.py","file_name":"util_modelo_final.py","file_ext":"py","file_size_in_byte":42566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"108137872","text":"import pandas as pd\nimport os\nimport numpy as np\n\ndata = pd.read_csv(os.path.expanduser('~/Downloads/object-detection-crowdai/labels_crowdai.csv'))\n\nW, H = 1920, 1200\n#xs, ys = 416./W, 416./H\n# W, H = 416, 416\n\ndw = 1. / W\ndh = 1. / H\n\nlabels = data.Label.unique()\n\n# data['x1'] = np.min(data[['xmin', 'xmax']], axis=1) * xs\n# data['x2'] = np.max(data[['xmin', 'xmax']], axis=1) * xs\n# data['y1'] = np.min(data[['ymin', 'ymax']], axis=1) * ys\n# data['y2'] = np.max(data[['ymin', 'ymax']], axis=1) * ys\n\ndata['x1'] = data['xmin']\ndata['x2'] = data['xmax']\ndata['y1'] = data['ymin']\ndata['y2'] = data['ymax']\n\ndata['w'] = dw * (data['x2'] - data['x1'])\ndata['h'] = dh * (data['y2'] - data['y1'])\ndata['x'] = dw * ((data['x1'] + data['x2']) / 2.)\ndata['y'] = dh * ((data['y1'] + data['y2']) / 2.)\n\ndata.to_csv(os.path.expanduser('~/Downloads/object-detection-crowdai/labels2.csv'), index=False)\n\n","sub_path":"prepare_data.py","file_name":"prepare_data.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"299635395","text":"import random\r\nimport time\r\nMAX = 100\r\nMIN = 0\r\nc = random.randint(MIN, MAX)\r\ntotal = 0\r\nstarttime = time.clock()\r\n\r\nprint(\"*~猜數字遊戲~*\")\r\nprint(c)\r\nuser_input = input(\"請輸��範圍在 0~100之間的正整數:\")\r\n\r\n\r\ndef judgment(input_number):\r\n global MAX\r\n global MIN\r\n global starttime\r\n global total\r\n try:\r\n input_number = int(input_number)\r\n if MAX < input_number or input_number < MIN:\r\n total += 1\r\n return input(\"你輸入的數字和 87 有 100% 像, 範圍在\" + str(MIN) + \"到\" + str(MAX) + \"之間好嗎:\")\r\n if MIN <= input_number <= MAX:\r\n if input_number > c:\r\n total += 1\r\n MAX = input_number\r\n MIN = MIN\r\n return input(\"猜太大了, 範圍在\" + str(MIN) + \"到\" + str(MAX) + \"之間:\")\r\n elif input_number < c:\r\n total += 1\r\n MAX = MAX\r\n MIN = input_number\r\n return input(\"猜太小了, 範圍在\" + str(MIN) + \"到\" + str(MAX) + \"之間:\")\r\n if input_number == c:\r\n total += 1\r\n endtime = time.clock()\r\n print(\"恭喜你猜中了,總共猜了\" + str(total) + \"次才猜對\")\r\n print(\"花費了人生\" + str(int(endtime - starttime)) + \"秒在這個遊戲上.\")\r\n return c\r\n except:\r\n total += 1\r\n return input(\"別再做錯誤測試了!!\" + \"範圍在\" + str(MIN) + \"到\" + str(MAX) + \"之間:\")\r\n\r\nwhile user_input != c:\r\n user_input = judgment(user_input)\r\n\r\n\r\n","sub_path":"猜數字.py","file_name":"猜數字.py","file_ext":"py","file_size_in_byte":1579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"163148497","text":"\"\"\"Top Heroes test case\"\"\"\nimport unittest\n\nfrom mock import patch\nfrom mockfirestore import MockFirestore\n\nfrom main import app\nfrom models.hero import Hero\n\n\nclass TopHeroesHandlerTestCase(unittest.TestCase):\n \"\"\"Top Heroes handler\"\"\"\n\n def setUp(self):\n \"\"\"SetUp é chamado no inicio de cada teste\"\"\"\n self.mock_db = MockFirestore()\n self.patcher = patch(\n 'modules.main.MainModule.get_firestore_db',\n return_value=self.mock_db)\n self.patcher.start()\n # Nessa linha vamos iniciar a API nos testes\n self.app = app.test_client()\n\n def tearDown(self):\n \"\"\"O tearDown é chamado no final de cada teste\"\"\"\n self.patcher.stop()\n self.mock_db.reset()\n\n @staticmethod\n def create_hero(hero_name, universe):\n \"\"\"Create a hero for tests\"\"\"\n hero = Hero()\n hero.name = hero_name\n hero.description = '{0} description'.format(hero_name)\n hero.universe = universe\n hero.save()\n return hero\n\n def test_get_top_heroes(self):\n \"\"\"Test get top heroes\"\"\"\n # Aqui vamos fazer um loop e criar 20 herois\n # E o nome vai ser hero + index do loop, ex: \"Hero 1\"\n for index in range(1, 21):\n self.create_hero('Hero {0}'.format(index), 'marvel')\n\n # Fazendo a primeira consulta a url e conferindo a resposta\n response = self.app.get(path='/top-heroes')\n first_hero_list = response.get_json()['heroes']\n self.assertEqual(len(first_hero_list), 5)\n self.assertEqual(response.status_code, 200)\n\n # Fazendo a segunda consulta a url e conferindo a resposta\n response = self.app.get(path='/top-heroes')\n self.assertEqual(response.status_code, 200)\n second_hero_list = response.get_json()['heroes']\n self.assertEqual(len(second_hero_list), 5)\n\n # Comparando as duas listas para ver se são diferentes\n # Pois essa url precisa sempre retornar herois diferentes\n self.assertNotEqual(first_hero_list, second_hero_list)\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"api-heroes-blank/tests/test_top_heroes.py","file_name":"test_top_heroes.py","file_ext":"py","file_size_in_byte":2113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"95014112","text":"#! /usr/bin/python3\n\nimport argparse\nimport os\nimport sys\n\nfrom utils import set_mac, set_random_mac, reset_mac\n\n\nparser = argparse.ArgumentParser(\n usage=\"sudo {} [-h] (-s [INTERFACE] [MAC-ADDR] | -r [INTERFACE] | -R [INTERFACE])\".format(sys.argv[0]),\n description=\"Script to change your MAC address\")\n\nif os.getuid() != 0:\n print(\"Run as root\")\n parser.print_usage()\n sys.exit(0)\n\ngroup = parser.add_mutually_exclusive_group(required=True)\ngroup.add_argument(\"-s\", \"--set\", nargs=2, metavar=(\"[INTERFACE]\", \"[MAC-ADDR]\"), help=\"Sets the given mac address\")\ngroup.add_argument(\"-r\", \"--randomise\", metavar=\"[INTERFACE]\", help=\"Sets a random MAC address\")\ngroup.add_argument(\"-R\", \"--reset\", metavar=\"[INTERFACE]\", help=\"Reset the MAC address\")\nargs = parser.parse_args()\n\nif args.set:\n set_mac(args.set[0], args.set[1])\n\nelif args.randomise:\n set_random_mac(args.randomise)\n\nelif args.reset:\n reset_mac(args.reset)\n","sub_path":"Change-mac/change-mac.py","file_name":"change-mac.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"621036237","text":"from flask import Flask, Response\nimport cv2\nimport numpy as np\n\nclass Camera(object):\n def __init__(self):\n self.cap = cv2.VideoCapture(0)\n # Reset camera capture size for faster processing\n self.cap.set(3,480)\n self.cap.set(4,360)\n\n def get_frame(self):\n ret, frame = self.cap.read()\n # Apply laplacian edge detection to image\n laplacian = cv2.Laplacian(frame,cv2.CV_64F)\n # Write out original and edge detected images at once\n cv2.imwrite('blah.jpg',np.hstack((frame,laplacian)))\n return open('blah.jpg', 'rb').read()\n\napp = Flask(__name__)\n\ndef gen(camera):\n while True:\n frame = camera.get_frame()\n yield (b'--frame\\r\\n'b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n')\n\n@app.route('/')\ndef video_feed():\n return Response(gen(Camera()),mimetype='multipart/x-mixed-replace;boundary=frame')\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', debug=True)\n","sub_path":"Cam-Tests/livestream.py","file_name":"livestream.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"588679344","text":"import pygal\n\nfrom 骰子.die import Die\n\n# 创建一个 D6 和 D10\ndie_1 = Die()\ndie_2 = Die(10)\n\nresults = []\nfor roll_num in range(5000):\n result = die_1.roll() + die_2.roll()\n results.append(result)\n\nfrequencies = []\nmax_results = die_1.num_sides + die_2.num_sides\nfor value in range(2, max_results+1):\n # 计算某个值出现同样的次数\n frequency = results.count(value)\n frequencies.append(frequency)\n\n# 对结果进行可视化\nhist = pygal.Bar()\n\nhist.title = \"5000 次:D6 + D10 的结果。\"\nhist.x_labels = [str(num) for num in range(2, 17)]\nhist.x_title = \"结果\"\nhist.y_title = \"重复出现的次数\"\n\nhist.add('D6 + D10', frequencies)\nhist.render_to_file('images/different_visual.svg')","sub_path":"Python/Python 编程 从入门到实践/my_data_view/骰子/different_dice.py","file_name":"different_dice.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"229678008","text":"# CREATE THIS with a Graph Class: takes in a length of vertices\n# [[1, 2, 3], [0], [0, 3], [0, 2]]\nfrom random import randrange\nfrom collections import defaultdict\n\nclass Graph:\n def __init__(self, V):\n self._V = V # number of vertices\n self._E = 0\n # create initial structure for vertices and edges to be placed in. \n self.graph_display = [[] for i in range(V)]\n self.adjacency_list = defaultdict(list)\n\n def V(self):\n return self._V\n \n def E(self):\n return self._E\n \n def _validate_vertex_input(self, v):\n if v < 0 or v >= self._V:\n raise Exception(\"Dude, no bueno\")\n return True\n \n def add_edges_to_initial_graph(self, v, u):\n if self._validate_vertex_input(v):\n # [0,1]->[[1],[0]];\n # [0,2]->[[1,2],[0],[0]];\n # [0,3]->[[1,2,3],[0],[0],[0]];\n # [2,3]->[[1,2,3],[0],[0,3],[0,2]];\n self.graph_display[v].append(u)\n self.graph_display[u].append(v)\n self._E += 1\n \n # def transform_to_adjacency_list(self):\n # if not self._E:\n # raise Exception(\"Empty Graph array\")\n \n # for source in self.graph_display:\n # self.adjacency_list[source[0]].append()\n \n\nif __name__ == \"__main__\":\n random_graph_vertex = 4\n g = Graph(random_graph_vertex)\n\n # for i in range(random_graph_vertex):\n g.add_edges_to_initial_graph(3, 2)\n g.add_edges_to_initial_graph(0, 3)\n g.add_edges_to_initial_graph(0, 2)\n g.add_edges_to_initial_graph(0, 1)\n g.add_edges_to_initial_graph(1, 3)\n\n # g.transform_to_adjacency_list()\n\n print(g.adjacency_list)\n\n \n\n\n","sub_path":"algoexpert/graphs/graph_basics/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":1702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"364242983","text":"filename = \"/home/antonio/Progetto Grafica/lines/tappoforato.lines\"\nlines = lines2lines(filename)\nrobiewall = STRUCT(AA(POLYLINE)(lines)) \nV,FV,EV,polygons = larFromLines(lines)\nVV = AA(LIST)(range(len(V)))\nsubmodel = STRUCT(MKPOLS((V,EV)))\n#VIEW(larModelNumbering(1,1,1)(V,[VV,EV,FV],submodel,0.05))\n\nW = (((mat(V) - V[5]) * 438.31) + [.5,.5]).tolist()\n\n#y 8 e 10? 78.89580000000002\n#y 9 e 11 ? 51.15077700000001\n\n\nW[8] = [W[8][0],78.89580000000002]\nW[10] = [138.37446699999998,78.89580000000002]\n\nW[9] = [W[9][0],51.15077700000001]\nW[11] = [138.37446699999998,51.15077700000001]\n\nVV = AA(LIST)(range(len(V)))\nsubmodel = STRUCT(MKPOLS((V,EV)))\n#VIEW(larModelNumbering(1,1,1)(V,[VV,EV,FV],submodel,0.05))\n\ntsottM = ([W[1]]+[W[0]]+[W[2]]+[W[3]],[[0,1,2,3]])\ntspessM = (W,[[8,2,9,3],[9,3,11,0],[11,0,10,1],[1,10,2,8]])\n\ntsottP = [-5,-alzo0,-sfspac+bordino,bordino]\ntspessP = [-5,-alzo0,sfspac]\n\ntsott = larModelProduct([tsottM,larQuote1D(tsottP)])\ntspess = larModelProduct([tspessM,larQuote1D(tspessP)])\n\n#VIEW(STRUCT(MKPOLS(tspess)+MKPOLS(tsott)))\n\nW[0] = SUM([W[0],[-larghezzaB,-larghezzaB]])\nW[1] = SUM([W[1],[-larghezzaB,larghezzaB]])\nW[2] = SUM([W[2],[larghezzaB,larghezzaB]])\nW[3] = SUM([W[3],[larghezzaB,-larghezzaB]])\n\ntfstileP = [-5,-alzo0,bordino,-bordino,2*bordino]\ntfstile = larModelProduct([tspessM,larQuote1D(tfstileP)])","sub_path":"266319/models/tappoforato.py","file_name":"tappoforato.py","file_ext":"py","file_size_in_byte":1334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"266561567","text":"import logging\nimport os\nimport subprocess\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\n\nUSER_APP_DIR = os.path.join(BASE_DIR, \"userapps\")\nSETTINGS_DIR = os.path.join(BASE_DIR, \"config\", \"settings\")\nLOG_DIR = os.path.join(BASE_DIR, \"log\")\n\nSETTING_TEMPLATE = \"\"\"\n# -*- coding: utf-8 -*-\nfrom .base import *\n\"\"\"\n\nlogging.basicConfig(\n level=logging.DEBUG\n)\n\n\ndef make_new_app(app_name):\n new_app_directory = os.path.join(USER_APP_DIR, app_name)\n if os.path.exists(new_app_directory):\n overwrite = input(\"{} has exist, is overwrite? yes/no:\").strip()\n if overwrite == \"yes\":\n logging.warn(\"remove exists dir:%s\", new_app_directory)\n os.removedirs(new_app_directory)\n \n else:\n logging.warn(\"has exists app dir, will exit\")\n return False\n \n cmd = \"python {manage} startapp {app_name} {directory}\".format(\n manage=os.path.join(BASE_DIR, \"manage.py\"),\n app_name=app_name,\n directory=new_app_directory\n )\n os.makedirs(new_app_directory)\n logging.info(\"new app cmd:%s\", cmd)\n p = subprocess.Popen(cmd, shell=True)\n p.wait()\n return True\n\n\ndef make_app_setting(app_name):\n app_setting = os.path.join(SETTINGS_DIR, app_name + \".py\")\n \n with open(app_setting, \"w\") as f:\n f.write(SETTING_TEMPLATE)\n\n\ndef main():\n app_name = input(\"input your app name:\").strip()\n if make_new_app(app_name):\n make_app_setting(app_name)\n\n\nif __name__ == '__main__':\n try:\n main()\n except Exception as e:\n logging.exception(e)\n input(\"press any key to continue!\")\n","sub_path":"make_app.py","file_name":"make_app.py","file_ext":"py","file_size_in_byte":1621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"173813247","text":"#=========================================================================\n# This is OPEN SOURCE SOFTWARE governed by the Gnu General Public\n# License (GPL) version 3, as described at www.opensource.org.\n# Copyright (C)2016 William H. Majoros (martiandna@gmail.com).\n#=========================================================================\nfrom __future__ import (absolute_import, division, print_function, \n unicode_literals, generators, nested_scopes, with_statement)\nfrom builtins import (bytes, dict, int, list, object, range, str, ascii,\n chr, hex, input, next, oct, open, pow, round, super, filter, map, zip)\n# The above imports should allow this program to run in both Python 2 and\n# Python 3. You might need to update your version of module \"future\".\n\n######################################################################\n# Attributes:\n# transcripts : list of Transcript objects\n# ID\n# transcriptHash : transcripts hashed by their ID\n# Methods:\n# gene=Gene()\n# gene.addTranscript(t)\n# n=gene.getNumTranscripts()\n# n=gene.numTranscripts()\n# t=gene.getIthTranscript(i)\n# t=gene.longestTranscript()\n# id=gene.getId()\n# id=gene.getID()\n# gene.setId(id)\n# begin=gene.getBegin() # leftmost edge\n# end=gene.getEnd() # rightmost edge\n# strand=gene.getStrand()\n# substrate=gene.getSubstrate()\n# gff=gene.toGff()\n# exons=gene.getMergedExons()\n# \n######################################################################\n\nclass Gene:\n def __init__(self):\n self.transcripts=[]\n self.transcriptHash={}\n\n def getMergedExons(self):\n transcripts=self.transcripts\n exons=[]\n for transcript in transcripts:\n raw=transcript.getRawExons()\n exons.extend(raw)\n #print(\"RAW:\",len(raw))\n #for i in range(len(raw)):\n #print(\"\\t\",raw[i].begin,raw[i].end)\n #print()\n exons.sort(key=lambda x: x.begin)\n n=len(exons)\n i=0\n while(iend): end=e\n return end\n\n def addTranscript(self,transcript):\n id=transcript.getTranscriptId()\n hash=self.transcriptHash\n if(hash.get(id,None) is not None): return\n self.transcripts.append(transcript)\n hash[id]=transcript\n\n def getNumTranscripts(self):\n return len(self.transcripts)\n\n def numTranscripts(self):\n return len(self.transcripts)\n\n def getIthTranscript(self,i):\n return self.transcripts[i]\n\n def longestTranscript(self):\n transcripts=self.transcripts\n if(len(transcripts)==0): return None\n longest=transcripts[0]\n longestLength=longest.getExtent()\n for transcript in transcripts[1:]:\n length=transcript.getExtent()\n if(length>longestLength):\n longest=transcript\n longestLength=length\n return longest\n\n def getId(self):\n return self.ID\n\n def getID(self):\n return self.ID\n\n def setId(self,id):\n self.ID=id\n\n def getBeginAndEnd(self):\n transcripts=self.transcripts\n begin=None\n end=None\n for transcript in transcripts:\n b=transcript.getBegin()\n e=transcript.getEnd()\n if(begin is None): begin=b; end=e\n else:\n if(bend): end=e\n return (begin,end)\n\n def toGff(self):\n transcripts=self.transcripts\n gff=\"\"\n for transcript in transcripts:\n gff+=transcript.toGff()\n return gff\n\n def __hash__(self):\n return hash(self.ID)\n\n def __eq__(self,other):\n return self.ID==other.ID\n\n \n","sub_path":"Gene.py","file_name":"Gene.py","file_ext":"py","file_size_in_byte":4777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"537527988","text":"# encoding:utf8\n\nimport urllib2\nimport jsonpickle\nimport os\n\nfrom BeautifulSoup import BeautifulSoup\n\nos.environ['http_proxy'] = 'http://127.0.0.1:3128'\nos.environ['https_proxy'] = 'http://127.0.0.1:3128'\nnewhlist = []\n\n\ndef req(url):\n req = urllib2.Request(url)\n json = urllib2.urlopen(req).read()\n return json\n\ndistrict_names = u\"鼓楼 建邺 秦淮 玄武 雨花台 栖霞 江宁 浦口\".split(\" \")\n\n\ndef handlereq(a):\n if a.has_key('data'):\n if a['data'].has_key('list'):\n # projs = reduce(lambda a, b: a + \",\" + b, [d['project_name'] for d in a['data']['list']])\n for data in a['data']['list']:\n if data['district_name'] not in district_names:\n continue\n if data['house_type'] != u\"住宅\":\n continue\n projname = data['project_name']\n url = \"https://nj.fang.lianjia.com/loupan/p_%s\" % projname\n html = urllib2.urlopen(url).read()\n parsed_html = BeautifulSoup(html)\n text = parsed_html.body.find('div', attrs={'id': 'house-details'}).text.encode('utf8')\n company = text.split(\"物业公司:\")[1].split(\"最新开盘\")[0].decode(\"utf8\")\n jfsj = text.split(\"交房时间:\")[1].split(\"容积率\")[0]\n try:\n totalscore = parsed_html.body.find('span', attrs={'class': 'score'}).text.encode('utf8').split(\"分\")[0]\n except:\n totalscore = 3.11\n ndata = {\"resblock_name\": data['resblock_name'], \"avg_price_start\": data['avg_price_start'], \"address_remark\": data['address_remark'],\n \"min_frame_area\": data['min_frame_area'], \"max_frame_area\": data['max_frame_area'], \"show_price\": data[\"show_price\"],\n \"latitude\": data[\"latitude\"], \"longitude\": data[\"longitude\"], \"totalscore\": \"totalscore\", \"resblock_frame_area\": data['resblock_frame_area'],\n \"open_date\": data['open_date'], \"jfsj\": jfsj, \"bizcircle_name\": data['bizcircle_name'],\n 'process_status': data['process_status'], 'district_name': data['district_name'], 'company': company, 'jfsj': jfsj,\n 'totalscore': totalscore,'url':url}\n newhlist.append(ndata)\n\nurl = r'https://nj.fang.lianjia.com/loupan/bp200ep350nht1nht6nhs1/?_t=1'\na = jsonpickle.loads(req(url))\ntotal = a['data']['total']\npage = int(int(total) / 10) + 1\nhandlereq(a) # handle page 1\nfor i in range(2, page + 1):\n url = r'https://nj.fang.lianjia.com/loupan/bp200ep350nht1nht6nhs1pg%s/?_t=1' % i\n a = jsonpickle.loads(req(url))\n handlereq(a)\n\nimport xlwt\n\nstyle = xlwt.easyxf('font: name Consolas, bold on')\nwb = xlwt.Workbook(encoding=\"utf-8\")\nwb.__dict__['_Workbook__sst'].encoding = 'utf-8'\nws = wb.add_sheet(u\"新房\")\nrcnt = 1\ncnt = 0\nfor k, v in dict(newhlist[0]).iteritems():\n ws.write(0, cnt, k, style)\n cnt += 1\nfor data in newhlist:\n cnt = 0\n for k, v in dict(data).iteritems():\n ws.col(cnt).width = 15 * 256\n v = v if type(v) == unicode else jsonpickle.encode(v)\n ws.write(rcnt, cnt, v, style)\n cnt += 1\n rcnt += 1\nwb.save(os.path.join(\"./\", u\"新房.xls\"))\n","sub_path":"craw.py","file_name":"craw.py","file_ext":"py","file_size_in_byte":3273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"192760427","text":"__author__ = 'skamau'\nimport json\nimport urllib2\nimport datetime\n\ndef todate(datestring):\n return datetime.datetime.fromtimestamp(int(datestring))\n\nfeeds = dict()\nfeeds['Lowell'] = 'http://developer.mbta.com/lib/RTCR/RailLine_10.json'\nfeeds['Newburyport/Rockport'] = 'http://developer.mbta.com/lib/RTCR/RailLine_12.json'\nfeeds['Fitchburg/South Acton'] = 'http://developer.mbta.com/lib/RTCR/RailLine_9.json'\n\ndef trip_printout(options):\n printlines = ''\n for (line, url) in feeds.items():\n printlines = printlines + \"\\n--- %s line\\n\" % line\n info = urllib2.urlopen(url)\n data = json.load(info)\n\n results = data['Messages']\n\n trip_printed = []\n\n for train in sorted(results, key=lambda r:r['Scheduled']):\n trip = train['Trip']\n if not trip in trip_printed:\n trip_printed.append(trip)\n destination = train['Destination']\n # printlines = printlines + 'options: ' + options\n if options != 'all':\n # Not interested in trains bound for North Station since that's where I'm starting\n if destination.lower() == \"north station\":\n continue\n\n # The scheduled time for North Station is the one we want\n if train[\"Stop\"].lower() != \"north station\":\n if options != 'all':\n continue\n trainNumber = train['Vehicle']\n scheduled = todate(train['Scheduled'])\n lateness = train[\"Lateness\"]\n\n # if scheduled < datetime.datetime.now() - datetime.timedelta(minutes=10):\n # continue\n \n if trainNumber == '':\n trainNumber = 'unknown'\n printlines = printlines + \"%s Trip %s to %s, train '%s\" % (scheduled.strftime('%I:%M'), trip, destination, trainNumber)\n if lateness != '':\n printlines = printlines + \"lateness '%s'\" % (lateness) \n printlines = printlines + \"\\n\" \n # only print out 2 trips for each line\n #if len(trip_printed) >= 2:\n # break\n return printlines\n\nif __name__ == '__main__':\n #print trip_printout()\n #exit()\n\n import androidhelper\n droid = androidhelper.Android()\n response = None\n\n while True:\n if response is not None: \n opt = response.result\n else:\n opt = ''\n response = droid.dialogGetInput(\"Next Trips\", trip_printout(opt))\n if response.result is None:\n break","sub_path":"lines_dialog3.py","file_name":"lines_dialog3.py","file_ext":"py","file_size_in_byte":2656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"416815841","text":"\"\"\"Utility functions for sending emails.\"\"\"\nfrom django.template import RequestContext\nfrom django.template.loader import render_to_string\n\nimport mailer\n\nfrom .utils import html_to_plain_text\n\n\ndef send_email(request, extra_context, subject_template, body_template,\n from_email, recipients, priority=\"medium\"):\n \"\"\"\n Sends an email based on templates for subject and body.\n\n :param request: The current request instance.\n :param extra_context: A dictionary of items that should be added to the\n templates' contexts.\n :param subject_template: A string representing the path to the template of\n of the email's subject.\n :param body_template: A string representing the path to the template of\n the email's body.\n :param from_email: String that represents the sender of the email.\n :param recipients: A list of tuples of recipients. The tuples are similar\n to the ADMINS setting.\n\n \"\"\"\n if request:\n context = RequestContext(request, extra_context)\n else:\n context = extra_context\n subject = render_to_string(subject_template, context)\n subject = ''.join(subject.splitlines())\n message_html = render_to_string(body_template, context)\n message_plaintext = html_to_plain_text(message_html)\n mailer.send_html_mail(subject, message_plaintext, message_html, from_email,\n recipients, priority=priority)\n","sub_path":"django_libs/utils_email.py","file_name":"utils_email.py","file_ext":"py","file_size_in_byte":1423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"219830500","text":"import os\nscriptpath = os.path.dirname(os.path.realpath(__file__))\npath = os.path.join(scriptpath,\"processedCSV/\")\npath = path.replace(\"\\\\\",\"/\")\ntextfile_path = os.path.join(scriptpath,\"processed_csv.txt\")\npp = open(textfile_path, \"w\")\n\nfor r,d,f in os.walk(path):\n for file in f:\n if \".csv\" in file:\n pp.write(path+file+\"\\n\") \n \n","sub_path":"csv_to_bvh/write_processedjointsFiles.py","file_name":"write_processedjointsFiles.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"496927310","text":"from d2l import AllDeepLearning as d2l\nfrom mxnet import nd\nfrom AI.AILearning.NaturalLanguageProcessing import NaturalLanguageStatistics as loader\n\nbatch_size, num_step = 32, 25\ntrain_iter, vocab = loader.load_data_time_machine(batch_size, num_step)\n\n\ndef get_params(vocab_size, num_hiddens, ctx):\n num_inputs = num_outputs = vocab_size\n normal = lambda shape: nd.random.normal(scale=0.01, shape=shape, ctx=ctx)\n three = lambda: (normal((num_inputs, num_hiddens)),\n normal((num_hiddens, num_hiddens)),\n nd.zeros(num_hiddens, ctx=ctx))\n W_xz, W_hz, b_z = three()\n W_xr, W_hr, b_r = three()\n W_xh, W_hh, b_h = three()\n W_hq = normal((num_hiddens, num_outputs))\n b_q = nd.zeros(num_outputs, ctx=ctx)\n params = [W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q]\n for param in params:\n param.attach_grad()\n return params\n\n\ndef init_gru_state(batch_size, num_hiddens, ctx):\n return (nd.zeros(shape=(batch_size, num_hiddens), ctx=ctx),)\n\n\ndef gru(inputs, state, params):\n W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q = params\n H, = state\n outputs = []\n for X in inputs:\n Z = nd.sigmoid(nd.dot(X, W_xz) + nd.dot(H, W_hz) + b_z)\n R = nd.sigmoid(nd.dot(X, W_xr) + nd.dot(H, W_hr) + b_r)\n H_tilda = nd.tanh(nd.dot(X, W_xh) + nd.dot(R * H, W_hh) + b_h)\n H = Z * H + (1 - Z) * H_tilda\n Y = nd.dot(H, W_hq) + b_q\n outputs.append(Y)\n return nd.concat(*outputs, dim=0), (H,) # not use nd.concatenate and unpack list when pass argument\n\n\nvocab_size, num_hiddens, ctx = len(vocab), 256, d2l.try_gpu()\nnum_epochs, lr = 500, 1\nmodel = d2l.RNNModelScratch(len(vocab), num_hiddens, ctx, get_params, init_gru_state, gru)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, ctx)\nd2l.plt.show()\n\n","sub_path":"AILearning/ModernRecurrentNeuronNetwork/GateRecurrentUnit.py","file_name":"GateRecurrentUnit.py","file_ext":"py","file_size_in_byte":1845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"159332876","text":"from sklearn import datasets, metrics,preprocessing\nimport tensorflow as tf\n\nboston = datasets.load_boston()\n\nx_data = preprocessing.StandardScaler().fit_transform(boston.data)\ny_data = boston.target\n\nx = tf.placeholder(tf.float64,shape=(None,13))\ny_true = tf.placeholder(tf.float64,shape=(None))\n\nwith tf.name_scope('inference') as scope:\n w = tf.Variable(tf.zeros([1,13],dtype=tf.float64,name='weights'))\n b = tf.Variable(0,dtype=tf.float64,name='bias')\n y_pred = tf.matmul(w,tf.transpose(x)) + b\n\nwith tf.name_scope('loss') as scope:\n loss = tf.reduce_mean(tf.square(y_true-y_pred))\n\nwith tf.name_scope('train') as scope:\n learning_rate = 0.1\n optimizer = tf.train.GradientDescentOptimizer(learning_rate)\n train = optimizer.minimize(loss)\n\n# Before startng, initialize the variables, we will run this first\ninit = tf.global_variables_initializer()\nwith tf.Session() as sess:\n sess.run(init)\n for step in range(200):\n sess.run(train,{x:x_data,y_true: y_data})\n\n MSE = sess.run(loss, {x: x_data, y_true:y_data}) # mean squared error - average of the squared differences between a real target value and our predicted value\nprint(MSE)\n\n\n# Same thing using contrib.learn's estimator\nNUM_STEPS = 200\nMINIBATCH_SIZE=506\n\nfeature_columns = tf.contrib.learn.infer_real_valued_columns_from_input(x_data)\n\nreg = tf.contrib.learn.LinearRegressor(feature_columns=feature_columns, optimizer=tf.train.GradientDescentOptimizer(learning_rate=0.1))\n\nreg.fit(x_data, boston.target,steps=NUM_STEPS,batch_size=MINIBATCH_SIZE)\n\nMSE = reg.evaluate(x_data, boston.target,steps=NUM_STEPS,batch_size=MINIBATCH_SIZE)\nprint(\"Using contrib.learns's estimator\")\nprint(MSE)","sub_path":"TensorFlow/LearningTF/linearregressiowithloss.py","file_name":"linearregressiowithloss.py","file_ext":"py","file_size_in_byte":1682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"547328060","text":"\"\"\"\n\"\"\"\nimport pprint\n\ntry:\n from stage_check import Output\nexcept ImportError:\n import Output\n\ntry:\n from stage_check import OutputT1Detail\nexcept ImportError:\n import OutputT1Detail\n\n\ndef create_instance():\n return OutputT1DetailText()\n\n\nclass OutputT1DetailText(OutputT1Detail.Base, Output.Text):\n \"\"\"\n \"\"\"\n def __init__(self):\n super().__init__()\n\n def amend_test_result(\n self,\n all_entries, \n matching_entries,\n params\n ):\n \"\"\"\n \"\"\"\n if len(matching_entries) > 0:\n exception_count = 0\n for entry in matching_entries:\n if \"result_text\" in entry:\n self.message_list.extend(entry[\"result_text\"])\n exception_count += len(entry[\"result_text\"]) - 1\n self.status = Output.Status.FAIL\n self.message = f\"{exception_count} exceptions detected for {len(matching_entries)}/{len(all_entries)} interfaces(s)\"\n\n else:\n self.message = f\"All {len(all_entries)} interfaces(s) within parameters\"\n self.status = Output.Status.OK\n return self.status\n\n\n","sub_path":"stage_check/stage_check/OutputT1DetailText.py","file_name":"OutputT1DetailText.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"621047221","text":"import random;\nimport os;\nimport collections;\nfrom data import functions;\nfrom data import gui_content\nfrom data import container;\nfrom tkinter import *;\nfrom tkinter import font;\nimport math;\nfrom functools import partial\nimport types\n\ndef register(data, override=False):\n\tplacejson = get_places();\n\tplace = {};\n\t\n\t# --#\t\t\t\t\t\t\t\t\t\t#\n\tplace[\"options\"]=data[\"options\"];\t\t\t#\n\t\t# Optionen:\n\t\t# \t- events:\n\t\t#\t\t- enterf: Event, das beim ersten Betreten ausgeführt wird.\n\t\t#\t\t- entern: Event, das bei jedem weiteren Betreten ausgeführt wird.\n\t\t#\t\t- random: Array von Events, die beim Aufenthalt in diesem Ort zufällig auftreten können.\n\t\t#\t\t- clickable: Array von Events, die beim Aufenthalt in diesem Ort per Knopfdruck ausgelöst werden können.\n\t\t#\t- garage: \n\t\t#\t\t- storage: Array von Fahrzeugen, die sich von Anfang an in der Garage dieses Ortes befinden sollen.\n\t\t#\t\t- size:\tAnzahl der Stellplätze in dieser Garage.\n\t\t#\t- storage:\n\t\t#\t\t- storage: Array von Items, die sich von Anfang an in dem Lagerplatz dieses Ortes befinden sollen.\n\t\t#\t\t- size: Anzahl der Items, die dieser Lagerplatz fassen können soll.\n\t# --#\t\t\t\t\t\t\t\t\t\t#\n\tcheck = True;\t\t\t\t\t\t\t\t#\n\ttry:\t\t\t\t\t\t\t\t\t\t#\n\t\texec(\"print(\"+str(data[\"garage\"])+\")\");\t\t#\n\texcept:\t\t\t\t\t\t\t\t\t\t#\n\t\tcheck = False;\t\t\t\t\t\t\t#\n\tif not check:\t\t\t\t\t\t\t\t\t#\n\t\tplace[\"garage\"] = {\"storage\":{},\"size\":1,\"disable\":False};\t#\n\telse:\t\t\t\t\t\t\t\t\t\t\t#\n\t\tplace[\"garage\"] = data[\"garage\"];\t\t#\n\t# --#\t\t\t\t\t\t\t\t\t\t#\n\tcheck = True;\t\t\t\t\t\t\t\t#\n\ttry:\t\t\t\t\t\t\t\t\t\t#\n\t\texec(\"print(\"+data[\"word\"]+\")\");\t\t#\n\texcept:\t\t\t\t\t\t\t\t\t\t#\n\t\tcheck = False;\t\t\t\t\t\t\t#\n\tif not check:\t\t\t\t\t\t\t\t#\n\t\tplace[\"word\"] = \"in\";\t\t\t\t\t#\n\telse:\t\t\t\t\t\t\t\t\t\t#\n\t\tplace[\"word\"] = data[\"word\"];\t\t#\n\t# --#\t\t\t\t\t\t\t\t\t\t#\n\tcheck = True;\t\t\t\t\t\t\t\t#\n\ttry:\t\t\t\t\t\t\t\t\t\t#\n\t\texec(\"print(\"+str(data[\"storage\"])+\")\");\t\t#\n\texcept:\t\t\t\t\t\t\t\t\t\t#\n\t\tcheck = False;\t\t\t\t\t\t\t#\n\tif not check:\t\t\t\t\t\t\t\t\t#\t\n\t\tplace[\"storage\"] = {\"storage\":{},\"size\":8,\"disable\":False};#\n\telse:\t\t\t\t\t\t\t\t\t\t\t#\n\t\tplace[\"storage\"] = data[\"storage\"];\t\t#\n\t# --#\t\t\t\t\t\t\t\t\t\t#\n\tcheck = True;\t\t\t\t\t\t\t\t#\n\ttry:\t\t\t\t\t\t\t\t\t\t#\n\t\texec(\"print(\"+str(data[\"description\"])+\")\");\t\t#\n\texcept:\t\t\t\t\t\t\t\t\t\t#\n\t\tcheck = False;\t\t\t\t\t\t\t#\n\tif not check:\t\t\t\t\t\t\t\t#\t\n\t\tplace[\"description\"] = \"\"\t\t\t\t#\n\telse:\t\t\t\t\t\t\t\t\t\t#\n\t\tplace[\"description\"] = data[\"description\"];\t#\n\t# --#\t\t\t\t\t\t\t\t\t\t#\n\t\n\tcon = container.register({\"inv\":place[\"storage\"][\"storage\"],\"name\":\"Lager von \"+data[\"name\"],\"size\":place[\"storage\"][\"size\"]});\n\tplace[\"system\"] = {\"first\":True,\"container\":con};\n\tcheck=False;\n\ttry:\n\t\texec(\"print(\\\"\"+str(placejson[data[\"name\"]])+\"\\\")\");\n\texcept:\n\t\tcheck=True;\n\tif check or override:\n\t\tfrom data.getgui import gui;\n\t\tgui.hook.onPlaceRegister.fire(place);\n\t\tplacejson[data[\"name\"]] = place;\n\tsave_places(placejson);\ndef enterplace(gui, name, takeveh=False, endjourney=False, force=False):\n\tgui.hook.onPlaceEnter.fire(name);\n\tgd = functions.get_gamedata();\n\tplace = get_place(name);\n\tcheck=True;\n\ttry:\n\t\texec(\"print(\\\"\"+str(gd[\"travel\"])+\"\\\")\");\n\texcept KeyError:\n\t\tcheck=False;\n\tstop=False;\n\tif check:\n\t\tif gd[\"travel\"][\"destination\"]==name and not endjourney and not force:\n\t\t\tstop=True;\n\tif not stop:\n\t\tgd[\"place\"] = name;\n\t\tgd[\"place_hub_timer\"]=0;\n\t\tif endjourney:\n\t\t\tveh = gd[\"travel\"][\"vehicle\"];\n\t\t\tdel gd[\"travel\"];\n\t\telif takeveh:\n\t\t\tveh = gd[\"travel\"][\"vehicle\"];\n\t\tgarageworks=True;\n\t\ttry:\n\t\t\tif veh[\"name\"]==\"selfdestruct\":\n\t\t\t\ttakeveh=False;\n\t\texcept UnboundLocalError:\n\t\t\tprint(\"\");\n\t\tif takeveh:\n\t\t\tgarageworks=False;\n\t\t\tif len(gd[\"places\"][name][\"garage\"][\"storage\"]) >= gd[\"places\"][name][\"garage\"][\"size\"]:\n\t\t\t\tfunctions.save_gamedata(gd);\n\t\t\t\tgarageerror(gui, veh);\n\t\t\telse:\n\t\t\t\tgd[\"places\"][name][\"garage\"][\"storage\"].append(veh);\n\t\t\t\tgarageworks=True;\n\t\tif garageworks:\n\t\t\tif place[\"system\"][\"first\"] and place[\"options\"][\"events\"][\"enterf\"][0] != \"None\":\n\t\t\t\tgd[\"places\"][name][\"system\"][\"first\"] = False;\n\t\t\t\tfunctions.save_gamedata(gd);\n\t\t\t\tgui.game(place[\"options\"][\"events\"][\"enterf\"][1], place[\"options\"][\"events\"][\"enterf\"][0], [\"place-enterf\", name]);\n\t\t\telse:\n\t\t\t\tfunctions.save_gamedata(gd);\n\t\t\t\tif place[\"options\"][\"events\"][\"entern\"][0] != \"None\":\n\t\t\t\t\tgui.game(place[\"options\"][\"events\"][\"entern\"][1], place[\"options\"][\"events\"][\"entern\"][0], [\"place-entern\", name]);\n\t\t\t\telse:\n\t\t\t\t\thub(gui, False, True);\n\telse:\n\t\tgui.new_text();\ndef hub(gui, travel=False, returned=False):\n\tgui.alreadyruns = True;\n\tgui.clear_screen();\n\tback = gui.hintergrund();\n\tback.pack();\n\tgd = functions.get_gamedata();\n\tif travel:\n\t\tprint(gd[\"travel\"][\"steps\"]);\n\t\tif gd[\"travel\"][\"steps\"] <= 0:\n\t\t\tprint(\"Destination Reached.\");\n\t\t\tenterplace(gui, gd[\"travel\"][\"destination\"], True, True);\n\t\telse:\n\t\t\tgd[\"travel\"][\"steps\"]-=1;\n\t\t\tfunctions.save_gamedata(gd);\n\t\t\tprint(\"Travel\");\n\t\t\tevent(gui, gd[\"travel\"][\"vehicle\"][\"events\"]);\n\telse:\n\t\ttry:\n\t\t\tplace = get_place(gd[\"place\"]);\n\t\texcept KeyError:\n\t\t\tgui.game(\"hub\",\"system\");\n\t\ttry:\n\t\t\texec(\"print(\\\"\"+str(gd[\"place_hub_timer\"])+\"\\\")\");\n\t\texcept:\n\t\t#\tprint(sys.exc_info());\n\t\t\tgd[\"place_hub_timer\"] = 0;\n\t\tprint(gd[\"place_hub_timer\"]);\n\t\tif not returned:\n\t\t\tif not gd[\"place_hub_timer\"] == 0:\n\t\t\t\tif not gd[\"place_hub_timer\"] >= (place[\"options\"][\"hubtimer\"]+1):\n\t\t\t\t\tgd[\"place_hub_timer\"]+=1;\n\t\t\t\t\tfunctions.save_gamedata(gd);\n\t\t\t\t\tprint(\"Random\");\n\t\t\t\t\tevent(gui, place[\"options\"][\"events\"][\"random\"]);\n\t\t\t\telse:\n\t\t\t\t\tprint(str(place[\"options\"][\"hubtimer\"]+1)+\"/\"+str(gd[\"place_hub_timer\"]));\n\t\t\t\t\tgd[\"place_hub_timer\"]=0;\n\t\t\t\t\tfunctions.save_gamedata(gd);\n\t\t\tgd[\"place_hub_timer\"]+=1;\n\t\tfunctions.save_gamedata(gd);\n\t\tText = Canvas(back, bg=\"Gold2\", highlightthickness=0);\n\t\tText.place(x=functions.pro_size(10,0), y=functions.pro_size(10,1));\n\t\tLabel(Text, text=\"Willkommen \"+place[\"word\"]+\" \"+gd[\"place\"], font=gui_content.ch_fontsize(32), bg=\"Gold2\").grid(row=1,columnspan=3,sticky=W);\n\t\tButton(Text, text=\"Garage\", command=partial(garage, gui), font=gui_content.ch_fontsize(\"16\"), width=functions.pro_size(1,0)).grid(row=2,padx=5,pady=5);\n\t\tif not place[\"storage\"][\"disable\"]:\n\t\t\tButton(Text, text=\"Lager\", command=partial(storage, gui), font=gui_content.ch_fontsize(\"16\"), width=functions.pro_size(1,0)).grid(row=2,column=1,padx=5,pady=5);\n\t\telse:\n\t\t\tButton(Text, text=\"Lager\", state=DISABLED, command=partial(storage, gui), font=gui_content.ch_fontsize(\"16\"), width=functions.pro_size(1,0)).grid(row=2,column=1,padx=5,pady=5);\n\t\tif not gd[\"place\"] in get_gps():\n\t\t\tButton(Text, text=\"Ort Speichern\", command=partial(savetogps, gui, place), font=gui_content.ch_fontsize(\"16\"), width=functions.pro_size(1,0)).grid(row=2,column=2,padx=5,pady=5);\n\t\telse:\t\t\t\n\t\t\tButton(Text, text=\"Ort Gespeichert.\", command=partial(print, \"\"), font=gui_content.ch_fontsize(\"16\"), width=functions.pro_size(1,0), bg=\"green\").grid(row=2,column=2,padx=5,pady=5);\n\t\tif len(place[\"options\"][\"events\"][\"clickable\"]) > 0:\n\t\t\tLabel(Text, text=\"Aktionen:\", font=gui_content.ch_fontsize(20), bg=\"Gold2\").grid(row=3,sticky=W,columnspan=3);\n\t\t\tkey = 0;\n\t\t\tfor value in place[\"options\"][\"events\"][\"clickable\"]:\n\t\t\t\trow = math.floor(key / 6) + 4;\n\t\t\t\tcolumn = (key % 6);\n\t\t\t\tif key < 61:\n\t\t\t\t\tButton(Text, text=value[2], command=partial(clickable, gui, value), font=gui_content.ch_fontsize(\"16\"), width=functions.pro_size(1,0), height=functions.pro_size(.05,1)).grid(row=row,column=column,padx=5,pady=5);\n\t\t\t\telif key == 61:\n\t\t\t\t\tLabel(Text, text=\"Maximum von 60 Aktionen überschritten.\", font=gui_content.ch_fontsize(20), bg=\"Gold2\", fg=\"red\").grid(row=14,sticky=W,columnspan=6);\t\n\t\t\t\tkey+=1;\t\n\t\tLabel(Text, text=place[\"description\"], font=gui_content.ch_fontsize(20), bg=\"Gold2\").grid(row=15,sticky=W,columnspan=3);\n\t\tButton(Text, text=gd[\"place\"]+\" verlassen\", command=partial(leave, gui), bg=\"red\", font=gui_content.ch_fontsize(\"16\"), width=functions.pro_size(1,0)).grid(row=16,padx=5,pady=5);\t\n\t\tif len(place[\"options\"][\"events\"][\"random\"]) > 0:\n\t\t\tButton(Text, text=\"Weitergehen\", command=partial(event, gui, place[\"options\"][\"events\"][\"random\"]), bg=\"green\", font=gui_content.ch_fontsize(\"16\"), width=functions.pro_size(1,0)).grid(row=16,column=1,padx=5,pady=5);\t\n\t\telse:\n\t\t\tButton(Text, text=\"Weitergehen\", state=DISABLED, command=partial(event, gui), bg=\"green\", font=gui_content.ch_fontsize(\"16\"), width=functions.pro_size(1,0)).grid(row=16,column=1,padx=5,pady=5);\n\t#\tButton(Text, text=\"HUB\", command=partial(mainhub, gui), font=gui_content.ch_fontsize(\"16\"), width=functions.pro_size(1,0)).grid(row=2,column=2,padx=5,pady=5);\t\t\ndef savetogps(gui, place, remkey=-1):\n\tgd = functions.get_gamedata();\n\ttry:\n\t\tgpsmax = gd[\"gpsmax\"];\n\texcept:\n\t\tgpsmax = 3;\n\t\tgd[\"gpsmax\"] = 3;\n\tgps = get_gps();\n\tif len(get_gps()) < gpsmax:\n\t\tgps.append(gd[\"place\"]);\n\t\tgd[\"gps\"]=gps;\n\t\tfunctions.save_gamedata(gd);\n\t\tprint(\"success\");\n\telse:\n\t\tif not remkey == -1:\n\t\t\tprint(\"replaced\");\n\t\t\tgps[remkey]=gd[\"place\"];\n\t\t\tgd[\"gps\"]=gps;\n\t\t\tfunctions.save_gamedata(gd);\n\t\telse:\n\t\t\tprint(\"full\");\n\t\t\tenterveh(gui, place, True, -2);\n\thub(gui);\ndef garage(gui):\n\t\tgui.clear_screen();\n\t\thintergrund = gui.hintergrund();\n\t\thintergrund.pack();\n\t\t\n\t\tLabel(hintergrund, text=\"Garage von \"+functions.get_gamedata()[\"place\"], font=gui_content.ch_fontsize(\"16\"), bg=\"green\"). place(y=functions.pro_size(1,1), x=functions.pro_size(50,0), anchor=N);\n\t\tButton(hintergrund, text=\"Zurück\", command=partial(hub, gui, False, True), font=gui_content.ch_fontsize(\"16\"), bg=\"green\"). place(y=functions.pro_size(5,1), x=functions.pro_size(50,0), anchor=N);\n\t\t\n\t\tinventar1 = Canvas(hintergrund, width=functions.pro_size(90,0), height=functions.pro_size(80,1));\n\t\tinventar1.place(anchor=N, x=functions.pro_size(50,0), y=functions.pro_size(10,1));\n\t\tinventar = functions.VerticalScrolledFrame(inventar1);\n\t\tinventar.place(width=functions.pro_size(90,0), height=functions.pro_size(80,1));\n\t\t\n\t\tinventar_content = get_place(functions.get_gamedata()[\"place\"])[\"garage\"][\"storage\"];\n\t\tif len(inventar_content) == 0:\n\t\t\tLabel(inventar.interior, text=\"Leer\", font=gui_content.ch_fontsize(\"32\")).place(x=functions.pro_size(50,0), y=functions.pro_size(50,1), anchor=CENTER);\n\t\telse:\n\t\t\n\t\t\txrow = 0;\n\t\t\tfor value in inventar_content:\n\t\t\t\txrow +=1\n\t\t\t\tnewcanvas = {};\n\t\t\t\tnewcanvas[xrow] = Canvas(inventar.interior, bg=\"green\", width=functions.pro_size(90,0), height=functions.pro_size(9,1));\n\t\t\t\tnewcanvas[xrow].grid(row=xrow);\n\t\t\t\tLabel(newcanvas[xrow], text=value[\"name\"], font=gui_content.ch_fontsize(\"40\"), bg=\"green\", fg=\"white\").place(x=functions.pro_size(1,0), y=functions.pro_size(4.5,1), anchor=W);\n\t\t\t\tButton(newcanvas[xrow], text=\"Benutzen\", command=partial(enterveh, gui, value, True, (xrow-1)), fg=\"white\",bg=\"green\").place(y=functions.pro_size(9,1), x=functions.pro_size(88,0), anchor=SE);\ndef storage(gui):\n\tcontainer.openxx(get_place(functions.get_gamedata()[\"place\"])[\"system\"][\"container\"], gui); \ndef enterveh(gui, vehicle, self=True, key=-1, returnfunc=False):\n\t\tif isinstance(self, bool):\n\t\t\tgui.clear_screen();\n\t\t\thintergrund = gui.hintergrund();\n\t\t\thintergrund.pack();\n\t\t\tishub=False;\n\t\telse:\n\t\t\tprint(\"Baum\");\n\t\t\tself.clear_screen1();\n\t\t\thintergrund = self.hintergrund;\n\t\t\tishub=True;\n\t\tgui.placeback = self;\n\t\tif not returnfunc:\n\t\t\tfunc=False;\n\t\telse:\n\t\t\tfunc=True;\n\t\tif key == -2:\n\t\t\tLabel(hintergrund, text=\"Dein GPS ist voll. Bitte wähle einen Eintrag, den du ersetzen möchtest.\", font=gui_content.ch_fontsize(\"24\"), bg=\"red\"). place(y=functions.pro_size(1,1), x=functions.pro_size(50,0), anchor=N);\n\t\telse:\t\n\t\t\tLabel(hintergrund, text=\"GPS-Einträge\", font=gui_content.ch_fontsize(\"16\"), bg=\"green\"). place(y=functions.pro_size(1,1), x=functions.pro_size(50,0), anchor=N);\n\t\tif not func:\n\t\t\tif ishub:\n\t\t\t\tButton(hintergrund, text=\"Abbrechen\", command=self.hub_menu, font=gui_content.ch_fontsize(\"16\"), bg=\"green\"). place(y=functions.pro_size(6,1), x=functions.pro_size(50,0), anchor=N);\n\t\t\telse:\t\t\t\n\t\t\t\tButton(hintergrund, text=\"Abbrechen\", command=partial(hub, gui, False, True), font=gui_content.ch_fontsize(\"16\"), bg=\"green\"). place(y=functions.pro_size(6,1), x=functions.pro_size(50,0), anchor=N);\n\t\telse:\n\t\t\tButton(hintergrund, text=\"Abbrechen\", command=returnfunc, font=gui_content.ch_fontsize(\"16\"), bg=\"green\"). place(y=functions.pro_size(6,1), x=functions.pro_size(50,0), anchor=N);\t\t\t\n\t\tinventar1 = Canvas(hintergrund, width=functions.pro_size(90,0), height=functions.pro_size(80,1));\n\t\tinventar1.place(anchor=N, x=functions.pro_size(50,0), y=functions.pro_size(10,1));\n\t\tinventar = functions.VerticalScrolledFrame(inventar1);\n\t\tinventar.place(width=functions.pro_size(90,0), height=functions.pro_size(80,1));\n\t\t\n\t\tinventar_content = get_gps();\n\t\tif len(inventar_content) == 0:\n\t\t\tLabel(inventar.interior, text=\"Leer\", fg=\"black\", font=gui_content.ch_fontsize(\"32\")).place(x=functions.pro_size(50,0), y=functions.pro_size(50,1), anchor=CENTER);\n\t\telse:\n\t\t\n\t\t\txrow = 0;\n\t\t\tfor value in inventar_content:\n\t\t\t\txrow +=1\n\t\t\t\tnewcanvas = {};\n\t\t\t\tnewcanvas[xrow] = Canvas(inventar.interior, bg=\"green\", width=functions.pro_size(90,0), height=functions.pro_size(9,1));\n\t\t\t\tnewcanvas[xrow].grid(row=xrow);\n\t\t\t\tLabel(newcanvas[xrow], text=value, font=gui_content.ch_fontsize(\"40\"), bg=\"green\", fg=\"white\").place(x=functions.pro_size(1,0), y=functions.pro_size(4.5,1), anchor=W);\n\t\t\t\tif not key == -2:\n\t\t\t\t\tButton(newcanvas[xrow], text=\"Reisen [\"+str(vehicle[\"steps\"])+\"]\", command=partial(travel, gui, value, vehicle, key), fg=\"white\",bg=\"green\").place(y=functions.pro_size(9,1), x=functions.pro_size(88,0), anchor=SE);\n\t\t\t\telse:\n\t\t\t\t\tButton(newcanvas[xrow], text=\"Ersetzen\", command=partial(savetogps, vehicle, (xrow-1)), fg=\"white\",bg=\"red\").place(y=functions.pro_size(9,1), x=functions.pro_size(88,0), anchor=SE);\t\t\t\t\t\ndef event(gui, events):\n\tgd = functions.get_gamedata();\n\tevent = random.SystemRandom().choice(events);\n\tkeyword=\"random\";\n\ttry:\n\t\texec(\"print(\\\"\"+gd[\"place\"]+\"\\\")\");\n\texcept:\n\t\tgd[\"place\"]=\"\";\n\t\tkeyword=\"travel\";\n\tgui.game(event[1], event[0], [\"place-\"+keyword, gd[\"place\"]]);\ndef leave(gui):\n\tgd = functions.get_gamedata();\n\tgui.hook.onPlaceLeave.fire(gd[\"place\"]);\n\ttry:\n\t\tplace = get_place(gd[\"place\"]);\n\t\tdel gd[\"place\"];\n\t\tfunctions.save_gamedata(gd);\n\t\tif place[\"options\"][\"events\"][\"exit\"][0] != \"None\":\n\t\t\tgui.game(place[\"options\"][\"events\"][\"exit\"][1], place[\"options\"][\"events\"][\"exit\"][0], [\"place-exit\", name]);\n\t\telse:\n\t\t\tgui.new_text();\n\texcept:\n\t\tgui.new_text();\ndef garageerror(gui, veh):\n\tgui.clear_screen();\n\tback = gui.hintergrund();\n\tback.pack();\n\tgd = functions.get_gamedata();\n\t\n\tLabel(back, text=\"Die Garage von \"+gd[\"place\"]+\"ist voll.\", bg=\"Gold2\", fg=\"red\", font=gui_content.ch_fontsize(\"32\")).grid(row=1, column=1, columnspan=2, sticky=NSEW)\n\tButton(back, text=\"Fahrzeug \"+veh[\"name\"]+\" zerstören.\", command=partial(hub, gui), font=gui_content.ch_fontsize(\"16\"), bg=\"green\").grid(row=2, column=1, sticky=NSEW)\n\tButton(back, text=\"Zu einem anderen Ort fahren.\", command=partial(enterveh, gui, veh), font=gui_content.ch_fontsize(\"16\"), bg=\"green\").grid(row=2, column=2, sticky=NSEW)\n\n\tback.grid_rowconfigure(0, weight=1)\n\tback.grid_rowconfigure(3, weight=1)\n\tback.grid_columnconfigure(0, weight=1)\n\tback.grid_columnconfigure(3, weight=1)\ndef travel(gui, place, vehicle, key=-1):\n\tgd = functions.get_gamedata();\n\tvehicle[\"key\"]=key;\n\tgd[\"travel\"] = {\"steps\":vehicle[\"steps\"],\"destination\":place,\"vehicle\":vehicle};\n\tif not key == -1:\n\t\tdel gd[\"places\"][gd[\"place\"]][\"garage\"][\"storage\"][key];\n\tfunctions.save_gamedata(gd);\n\tleave(gui);\ndef clickable(gui, event):\n\tgd = functions.get_gamedata();\n\tgui.game(event[1], event[0], [\"clickable\", gd[\"place\"]]);\ndef get_places():\n\ttry:\n\t\treturn functions.get_gamedata()[\"places\"];\n\texcept:\n\t\treturn {};\ndef get_gps():\n\ttry:\n\t\treturn functions.get_gamedata()[\"gps\"];\n\texcept:\n\t\treturn [];\ndef get_place(name):\n\treturn get_places()[name];\ndef save_places(array):\n\tfunctions.add_json_string(\"user\\gamedata.json\", \"places\", array);\ndef mainhub(gui):\n\tgui.hubstorage.__init__(gui)\n\tgui.hubstorage.hub_menu(gui.hubstorage)","sub_path":"data/place_functions.py","file_name":"place_functions.py","file_ext":"py","file_size_in_byte":15793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"631774234","text":"from flask import Blueprint\nfrom .graphiqlview import GraphiQLView\nfrom .graphqlview import GraphQLView\n\n\nclass GraphQL(object):\n def __init__(self, app, schema, **options):\n self.app = app\n self.blueprint = Blueprint('graphql', __name__,\n template_folder='templates',\n static_url_path='/static/graphql',\n static_folder='static/graphql/')\n\n default_query = options.pop('default_query', None)\n app.add_url_rule('/graphql', view_func=GraphQLView.as_view('graphql', schema=schema, **options))\n app.add_url_rule('/graphiql', view_func=GraphiQLView.as_view('graphiql', default_query=default_query))\n\n self.app.register_blueprint(self.blueprint)\n","sub_path":"flask_graphql/blueprint.py","file_name":"blueprint.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"273464533","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/cameron/Dev/kanban-dev/django-autotask/djautotask/migrations/0048_auto_20200211_1037.py\n# Compiled at: 2020-02-28 16:41:52\n# Size of source mod 2**32: 853 bytes\nfrom django.db import migrations, models\n\nclass Migration(migrations.Migration):\n dependencies = [\n ('djautotask', '0047_auto_20200207_1126')]\n operations = [\n migrations.AlterField(model_name='timeentry',\n name='hours_to_bill',\n field=models.DecimalField(blank=True, decimal_places=4, max_digits=9, null=True)),\n migrations.AlterField(model_name='timeentry',\n name='hours_worked',\n field=models.DecimalField(blank=True, decimal_places=4, max_digits=9, null=True)),\n migrations.AlterField(model_name='timeentry',\n name='offset_hours',\n field=models.DecimalField(blank=True, decimal_places=4, max_digits=9, null=True))]","sub_path":"pycfiles/django-autotask-0.0.68a0.tar/0048_auto_20200211_1037.cpython-36.py","file_name":"0048_auto_20200211_1037.cpython-36.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"616546651","text":"#-*- coding:utf-8 -*-\nimport setup\nimport datetime\nimport requests\nfrom logging_manager import LoggingManager\n\nrequests_logging = LoggingManager(to_file=\"logs/requests.log\")\n\ndef textify_response(resp, info_line=\"\"):\n return \"\"\"\n<-<\n{}\n{}\n\nHeaders:\n{}\n>->\n\n<-<\n{}\n>->\n-=-=-=-=\n\"\"\".format(\n info_line,\n resp.status_code,\n '\\n'.join(['{}:{}'.format(h,v) for h,v in resp.headers.items()]),\n resp.text\n )\n\ndef textify_request(req, info_line=\"\"):\n return \"\"\"\n<-<\n{}\n{} {}\n\nHeaders:\n{}\n\n>->\n<-<\n{}\n>->\n-=-=-=-=\n\"\"\".format(\n info_line,\n req.method,\n req.url,\n '\\n'.join(['{}:{}'.format(h,v) for h,v in req.headers.items()]),\n req.data\n )\n\ndef POST(endpoint, data, headers, save_to=None):\n req = requests.Request('POST', endpoint, data=data, headers=headers)\n txt_request = textify_request(req)\n requests_logging.log_msg(\"REQUEST TO {}\".format(endpoint), txt_request)\n\n r = requests.post(endpoint, data=data, headers=headers)\n txt_response = textify_response(r)\n requests_logging.log_msg(\"RESPONSE FROM {}\".format(endpoint), txt_response)\n\n if save_to:\n open(save_to, 'w', encoding='utf-8').write(r.text)\n return r\n\n","sub_path":"converter/requests_handler.py","file_name":"requests_handler.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"640650054","text":"import sys\nimport requests\nimport json\n\nif len(sys.argv) != 2:\n sys.exit(\"Usage: getListOfItemIdsFor.py numericItemId\")\nitem_id = sys.argv[1]\nprint(\"Querying wdq.wmflabs.org...\")\nitems = json.loads(requests.get(\"http://wdq.wmflabs.org/api?q=claim[227] and claim[31:\" + item_id + \"]\").text)['items']\nf = open(\"itemIdsForQ%s.txt\" % item_id, 'w')\ni = 0\nfor item in items:\n s = str(item) + ';'\n f.write(s)\n i += 1\n if i % 1000 == 0:\n print(str(i))\nf.close()","sub_path":"getListOfItemIdsFor.py","file_name":"getListOfItemIdsFor.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"419908917","text":"'''\n题目描述\n我们可以用2*1的小矩形横着或者竖着去覆盖更大的矩形。请问用n个2*1的小矩形无重叠地覆盖一个2*n的大矩形,总共有多少种方法?\n'''\n\n\n'''\n\n'''\ndef rectCover(self, number):\n if number == 1:\n return 1\n if number == 2:\n return 2\n ways = 0\n pre1 = 2\n pre2 = 1\n for i in range(2, number):\n ways = pre1 + pre2\n pre1, pre2 = ways, pre1\n return ways","sub_path":"DP/paveMatrix.py","file_name":"paveMatrix.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"501478839","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.interpolate import interp1d\n\ndef whiten(strain, interp_psd, dt):\n Nt = len(strain)\n freqs = np.fft.rfftfreq(Nt, dt)\n df = freqs[1] - freqs[0]\n\n # whitening: transform to freq domain, divide by asd, then transform back, \n # taking care to get normalization right.\n hf = np.fft.rfft(strain) * dt\n white_hf = hf / interp_psd(freqs)\n white_ht = np.fft.irfft(white_hf, n=Nt) * df \n return white_ht\n\n\nif __name__ == '__main__':\n\n from pycbc.waveform import get_td_waveform\n\n psddata = np.genfromtxt('ZERO_DET_high_P.txt')\n f = psddata[:,0]\n psd = psddata[:,1]\n interp_psd = interp1d(f, psd, fill_value=\"extrapolate\")\n\n hp, hc = get_td_waveform(approximant='SEOBNRv4',\n mass1=10.0, mass2=10.0,\n f_lower=20.0,\n delta_t=1.0/4096)\n\n t = np.array(hp.sample_times)\n dt = t[1] - t[0]\n hp = np.array(hp)\n\n hp_wh = whiten(hp, interp_psd, dt)\n\n plt.figure()\n plt.plot(t, hp_wh)\n plt.show()\n","sub_path":"pysrc/utils_old/whitening.py","file_name":"whitening.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"531928729","text":"import sys\nimport matplotlib.pyplot as plt\nregister = {\n \"000\": 0,\n \"001\": 0,\n \"010\": 0,\n \"011\": 0,\n \"100\": 0,\n \"101\": 0,\n \"110\": 0,\n \"111\": [0, 0, 0, 0]}\n\ndef memory(code):\n mem = []\n n = len(code)\n memlen = 256 - n\n for line in code:\n mem.append(line[0:16])\n for i in range(memlen):\n mem.append(\"0000000000000000\")\n return mem\n\n# Convert Binary Value Into 16 Bit\ndef binaryvalue(number):\n a = bin(number).replace(\"0b\", \"\")\n if (len(a) <= 16):\n d = \"0\" * (16 - len(a)) + a\n else:\n d = a[-16:]\n return d\n\ndef binaryToDecimal(n):\n return int(n, 2)\n\ndef memoryupdate(mem, val, location):\n bval = binaryvalue(val)\n mem[location] = bval\n return mem\n\nflags = \"0000000000000000\"\nans = []\nprc = \"00000000\" #Program Counter-->8 bits-->initialy pointing to 0 address\n\ndef output(pc, line, mem,flags): #identifies the instruction and calls the appropriate function\n address=\"10\"\n instr = line[0:5]\n line = line[5:16]\n halt = 0\n oldpc = pc\n\n if (instr == \"00000\"): #add\n flags = \"0000000000000000\"\n X1 = line[2:5]\n X2 = line[5:8]\n X3 = line[8:11]\n\n if (register[X2] + register[X3] > 65535):\n flags = \"0000000000001000\"\n x = register[X2] + register[X3]\n xbinary = binaryvalue(x)\n register[X1] = binaryToDecimal(xbinary)\n else:\n register[X1] = register[X2] + register[X3]\n elif (instr == \"00001\"): #Subtraction\n flags = \"0000000000000000\"\n X1 = line[2:5]\n X2 = line[5:8]\n X3 = line[8:11]\n if (register[X3] > register[X2]):\n register[X1] = 0\n flags = \"0000000000001000\"\n else:\n register[X1] = register[X2] - register[X3]\n\n elif (instr == \"00010\"): #mov reg1 $Imm\n flags = \"0000000000000000\"\n X1 = line[0:3]\n Imm = line[3:]\n register[X1] = binaryToDecimal(Imm)\n\n elif (instr == \"00011\"): #mov reg1 reg2\n X1 = line[5:8]\n X2 = line[8:]\n if(X2==\"111\"):\n m= binaryToDecimal(flags)\n register[X1]=m\n else: \n register[X1] = register[X2]\n flags = \"0000000000000000\"\n elif (instr == \"00100\"): #Load ld reg1 memaddr\n flags = \"0000000000000000\"\n X1 = line[0:3]\n memadr = line[3:11]\n address=line[3:11]\n memad = binaryToDecimal(memadr)\n bval = mem[memad]\n val = binaryToDecimal(bval)\n for i in register:\n if (i == X1):\n register[i] = val\n\n elif (instr == \"00101\"): #Store st reg1 memaddr\n flags = \"0000000000000000\"\n X1 = line[0:3]\n val = 0\n memadr = line[3:]\n address=line[3:11]\n location = binaryToDecimal(memadr)\n for i in register:\n if (i == X1):\n val = register[i]\n mem = memoryupdate(mem, val, location)\n\n elif (instr == \"00110\"): #Multiply\n flags = \"0000000000000000\"\n X1 = line[2:5]\n X2 = line[5:8]\n X3 = line[8:11]\n if (register[X2] * register[X3] > 65535):\n flags = \"0000000000001000\"\n x = register[X2] * register[X3]\n xbinary = binaryvalue(x)\n register[X1] = binaryToDecimal(xbinary)\n else:\n register[X1] = register[X2] * register[X3]\n\n elif (instr == \"00111\"): #Divide\n flags = \"0000000000000000\"\n X1 = line[5:8]\n X2 = line[8:11]\n register[\"000\"] = register[X1] // register[X2]\n register[\"001\"] = register[X1] % register[X2]\n\n elif (instr == \"01000\"): #Right Shift rs reg1 $imm\n flags = \"0000000000000000\"\n X1 = line[0:3]\n imm = int(line[3:11])\n register[X1] = register[X1] << imm\n\n elif (instr == \"01001\"): #Left Shift ls reg1 $imm\n flags = \"0000000000000000\"\n X1 = line[0:3]\n imm = int(line[3:11])\n register[X1] = register[X1] >> imm\n\n elif (instr == \"01010\"): #Exclusive OR\n flags = \"0000000000000000\"\n X1 = line[2:5]\n X2 = line[5:8]\n X3 = line[8:11]\n x=binaryvalue(register[X2]) \n y=binaryvalue(register[X3])\n z=''\n for i in range(16):\n a=str(int(x[i]))^str(int(y[i]))\n z+=a\n register[X1] = binaryToDecimal(z)\n\n elif (instr == \"01011\"): #Or\n flags = \"0000000000000000\"\n X1 = line[2:5]\n X2 = line[5:8]\n X3 = line[8:11]\n x=binaryvalue(register[X2]) \n y=binaryvalue(register[X3])\n z=''\n for i in range(16):\n a=str(int(x[i]))|str(int(y[i]))\n z+=a\n register[X1] = binaryToDecimal(z)\n\n elif (instr == \"01100\"): #And\n flags = \"0000000000000000\"\n X1 = line[2:5]\n X2 = line[5:8]\n X3 = line[8:11]\n x=binaryvalue(register[X2]) \n y=binaryvalue(register[X3])\n z=''\n for i in range(16):\n a=str(int(x[i]))&str(int(y[i]))\n z+=a\n register[X1] = binaryToDecimal(z)\n\n elif (instr == \"01101\"): #Invert\n flags = \"0000000000000000\"\n X1 = line[5:8]\n X2 = line[8:11]\n register[X1] = ~register[X2]\n\n elif (instr == \"01110\"): #Compare\n flags = \"0000000000000000\"\n X1 = line[5:8]\n X2 = line[8:11]\n if register[X1] == register[X2]:\n flags = \"0000000000000001\"\n if int(register[X2]) > int(register[X2]):\n flags = \"0000000000000010\"\n if int(register[X1]) < int(register[X2]):\n flags = \"0000000000000100\"\n\n elif (instr == \"01111\"): #UnconditionalJump\n memad = line[3:11]\n memval = binaryToDecimal(memad)\n pc = memval - 1 #-1 is done as in last lines we are doing pc=pc+1\n flags = \"0000000000000000\"\n\n elif (instr == \"10000\"): #Jump If Less Than\n if flags[\n 15] == \"1\": \n memad = line[3:]\n memval = binaryToDecimal(memad)\n pc = memval - 1 #-1 is done as in last lines we are doing pc=pc+1\n flags = \"0000000000000000\"\n #yaha pe jump\n\n elif (instr == \"10001\"): #Jump If Greater Than\n if flags[14] == \"1\":\n memad = line[3:]\n memval = binaryToDecimal(memad)\n pc = memval - 1 #-1 is done as in last lines we are doing pc=pc+1\n flags = \"0000000000000000\"\n\n elif (instr == \"10010\"): #Jump If Equal\n if flags[13] == \"1\":\n memad = line[3:]\n memval = binaryToDecimal(memad)\n pc = memval - 1 #-1 is done as in last lines we are doing pc=pc+1\n flags = \"0000000000000000\"\n elif (instr == \"10011\"): #Halt\n flags = \"0000000000000000\"\n halt = 1\n pc = pc + 1 #updation of program counter after fetching instruction\n #appending output\n\n prc = binaryvalue(oldpc)[8:16] #updating program counter(string)\n r0 = binaryvalue(register[\"000\"])\n r1 = binaryvalue(register[\"001\"])\n r2 = binaryvalue(register[\"010\"])\n r3 = binaryvalue(register[\"011\"])\n r4 = binaryvalue(register[\"100\"])\n r5 = binaryvalue(register[\"101\"])\n r6 = binaryvalue(register[\"110\"])\n klist = [prc, r0, r1, r2, r3, r4, r5, r6, flags]\n print(\" \".join(klist))\n\n prc = binaryvalue(pc)[8:16] #updating program counter(string)\n return halt, pc, mem, flags,address\n\ndef main():\n pc = 0\n code = []\n for i in sys.stdin:\n code.append(i)\n \n flags = \"0000000000000000\"\n halt = 0\n i = 0\n cycle = 0\n x=[]\n y=[]\n address=\"10\"\n mem = memory(code)\n while (halt == 0):\n i = pc\n line = code[i]\n x.append(cycle)\n y.append(pc)\n halt, pc, mem, flags, address = output(pc, line, mem, flags)\n if address!=\"10\":\n x.append(cycle)\n ad=binaryToDecimal(address)\n y.append(ad)\n cycle = cycle + 1 \n for line in mem: #memory dump\n print(line)\n plt.scatter(x, y)\n plt.show()\nmain()","sub_path":"SimpleSimulator/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"381425874","text":"import warnings\nfrom classytags.helpers import InclusionTag\nfrom django import template\nfrom django.template.loader import render_to_string\n\nregister = template.Library()\n\n\nclass CookielawBanner(InclusionTag):\n \"\"\"\n Displays cookie law banner only if user has not dismissed it yet.\n \"\"\"\n\n template = 'cookie/banner.html'\n\n def render_tag(self, context, **kwargs):\n template_filename = self.get_template(context, **kwargs)\n if 'request' not in context:\n warnings.warn('No request object in context. '\n 'Are you sure you have django.core.context_processors.request enabled?')\n return ''\n\n elif context['request'].COOKIES.get('cookielaw_accepted', False):\n return ''\n\n elif 'cookielaw_declined' in context['request'].COOKIES.values():\n return ''\n\n data = self.get_context(context, **kwargs)\n\n return render_to_string(template_filename, data, getattr(context, 'request', None))\n\n\nregister.tag(CookielawBanner)\n","sub_path":"cookie/templatetags/cookielaw_tags.py","file_name":"cookielaw_tags.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"411052665","text":"#! python3\n# --- IMPORTS --------------------------------------------------------------------------------------------------------\nimport os\n\n\n# --- FUNCTIONS ------------------------------------------------------------------------------------------------------\ndef count_chs(_string):\n count = 0\n for val in _string:\n count += 1 if val in 'abcdefghijklmnopqrstuvwxyz1234567890' else 0\n\n return count\n\n\ndef list2string(_data):\n\n # unite data per row\n rows = []\n\n for record in _data:\n tab1 = 3 if count_chs(record[0]) <= 6 else 2\n\n if record[2] != 'MX':\n new_string = record[0] + '\\t'*tab1 + record[1] + '\\t' + record[2] + '\\t\\t' + record[3]\n elif record[2] == 'MX':\n new_string = record[0] + '\\t' * tab1 + record[1] + '\\t' + record[2] + ' ' + record[3] + '\\t\\t' + record[4]\n\n rows.append(new_string)\n\n # luckily default sort is exactly what I want\n rows.sort()\n\n # add space every after type 'TXT'\n list_of_str = []\n for row in rows:\n list_of_str.append(row)\n if 'TXT' in row:\n list_of_str.append('')\n\n # unite rows\n big_str = '\\n'.join(list_of_str)\n\n return big_str\n\n\n# --- MAIN CODE ------------------------------------------------------------------------------------------------------\n# read file\nrel_path = os.path.join('..', 'data', 'records.txt')\nabs_path = os.path.abspath(rel_path)\nf = open(abs_path)\ndata = f.read()\n\n# get data from file\nlines = data.split('\\n')\nlines_data = []\n\n# separate values in every line\nfor i, line in enumerate(lines):\n line = line.strip()\n if not line:\n continue\n\n # save only non-empty characters from each line\n line_values = []\n value = ''\n for ch in line:\n\n valid = False if ch == ' ' or ch == '\\t' else True\n\n if not valid and not value:\n pass\n\n elif not valid and value:\n line_values.append(value)\n value = ''\n continue\n\n elif valid:\n value += ch\n\n # save last value(always unsaved)\n else:\n line_values.append(value)\n lines_data.append(line_values)\n\n# adding TXT registers\nlines_data_txt = []\nfor line in lines_data:\n\n rtype = line[2]\n if rtype != 'MX':\n lines_data_txt.append(line)\n\n elif rtype == 'MX':\n name = line[0]\n lines_data_txt.append(line)\n lines_data_txt.append([name, 'IN', 'TXT', '\\\"v=spf1 a mx -all\\\"'])\n\n# save to file\noutput = list2string(lines_data_txt) # replace list for a formatted string\nrel_path = os.path.join('..', 'data', 'output.txt')\nabs_path = os.path.abspath(rel_path)\nf = open(abs_path, \"w\")\nf.write(output)\nf.close()\n","sub_path":"005 dnsrecords2/code/add_txt.py","file_name":"add_txt.py","file_ext":"py","file_size_in_byte":2680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"360422870","text":"import asyncio\nfrom unittest.mock import MagicMock\n\nimport pytest\n\nfrom aioesphomeapi._frame_helper import APINoiseFrameHelper, APIPlaintextFrameHelper\nfrom aioesphomeapi.core import BadNameAPIError, InvalidEncryptionKeyAPIError\nfrom aioesphomeapi.util import varuint_to_bytes\n\nPREAMBLE = b\"\\x00\"\n\n\n@pytest.mark.asyncio\n@pytest.mark.parametrize(\n \"in_bytes, pkt_data, pkt_type\",\n [\n (PREAMBLE + varuint_to_bytes(0) + varuint_to_bytes(1), b\"\", 1),\n (\n PREAMBLE + varuint_to_bytes(192) + varuint_to_bytes(1) + (b\"\\x42\" * 192),\n (b\"\\x42\" * 192),\n 1,\n ),\n (\n PREAMBLE + varuint_to_bytes(192) + varuint_to_bytes(100) + (b\"\\x42\" * 192),\n (b\"\\x42\" * 192),\n 100,\n ),\n (\n PREAMBLE + varuint_to_bytes(4) + varuint_to_bytes(100) + (b\"\\x42\" * 4),\n (b\"\\x42\" * 4),\n 100,\n ),\n (\n PREAMBLE\n + varuint_to_bytes(8192)\n + varuint_to_bytes(8192)\n + (b\"\\x42\" * 8192),\n (b\"\\x42\" * 8192),\n 8192,\n ),\n (\n PREAMBLE + varuint_to_bytes(256) + varuint_to_bytes(256) + (b\"\\x42\" * 256),\n (b\"\\x42\" * 256),\n 256,\n ),\n (\n PREAMBLE + varuint_to_bytes(1) + varuint_to_bytes(32768) + b\"\\x42\",\n b\"\\x42\",\n 32768,\n ),\n (\n PREAMBLE\n + varuint_to_bytes(32768)\n + varuint_to_bytes(32768)\n + (b\"\\x42\" * 32768),\n (b\"\\x42\" * 32768),\n 32768,\n ),\n ],\n)\nasync def test_plaintext_frame_helper(in_bytes, pkt_data, pkt_type):\n for _ in range(5):\n packets = []\n\n def _packet(type_: int, data: bytes):\n packets.append((type_, data))\n\n def _on_error(exc: Exception):\n raise exc\n\n helper = APIPlaintextFrameHelper(on_pkt=_packet, on_error=_on_error)\n\n helper.data_received(in_bytes)\n\n pkt = packets.pop()\n type_, data = pkt\n\n assert type_ == pkt_type\n assert data == pkt_data\n\n\n@pytest.mark.asyncio\nasync def test_noise_frame_helper_incorrect_key():\n \"\"\"Test that the noise frame helper raises InvalidEncryptionKeyAPIError on bad key.\"\"\"\n outgoing_packets = [\n \"010000\", # hello packet\n \"010031001ed7f7bb0b74085418258ed5928931bc36ade7cf06937fcff089044d4ab142643f1b2c9935bb77696f23d930836737a4\",\n ]\n incoming_packets = [\n \"01000d01736572766963657465737400\",\n \"0100160148616e647368616b65204d4143206661696c757265\",\n ]\n packets = []\n\n def _packet(type_: int, data: bytes):\n packets.append((type_, data))\n\n def _on_error(exc: Exception):\n raise exc\n\n helper = APINoiseFrameHelper(\n on_pkt=_packet,\n on_error=_on_error,\n noise_psk=\"QRTIErOb/fcE9Ukd/5qA3RGYMn0Y+p06U58SCtOXvPc=\",\n expected_name=\"servicetest\",\n )\n helper._transport = MagicMock()\n\n for pkt in outgoing_packets:\n helper._write_frame(bytes.fromhex(pkt))\n\n with pytest.raises(InvalidEncryptionKeyAPIError):\n for pkt in incoming_packets:\n helper.data_received(bytes.fromhex(pkt))\n\n with pytest.raises(InvalidEncryptionKeyAPIError):\n await helper.perform_handshake()\n\n\n@pytest.mark.asyncio\nasync def test_noise_incorrect_name():\n \"\"\"Test we raise on bad name.\"\"\"\n outgoing_packets = [\n \"010000\", # hello packet\n \"010031001ed7f7bb0b74085418258ed5928931bc36ade7cf06937fcff089044d4ab142643f1b2c9935bb77696f23d930836737a4\",\n ]\n incoming_packets = [\n \"01000d01736572766963657465737400\",\n \"0100160148616e647368616b65204d4143206661696c757265\",\n ]\n packets = []\n\n def _packet(type_: int, data: bytes):\n packets.append((type_, data))\n\n def _on_error(exc: Exception):\n raise exc\n\n helper = APINoiseFrameHelper(\n on_pkt=_packet,\n on_error=_on_error,\n noise_psk=\"QRTIErOb/fcE9Ukd/5qA3RGYMn0Y+p06U58SCtOXvPc=\",\n expected_name=\"wrongname\",\n )\n helper._transport = MagicMock()\n\n for pkt in outgoing_packets:\n helper._write_frame(bytes.fromhex(pkt))\n\n with pytest.raises(BadNameAPIError):\n for pkt in incoming_packets:\n helper.data_received(bytes.fromhex(pkt))\n\n with pytest.raises(BadNameAPIError):\n await helper.perform_handshake()\n","sub_path":"tests/test__frame_helper.py","file_name":"test__frame_helper.py","file_ext":"py","file_size_in_byte":4406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"392458756","text":"import hashlib\nimport itertools\n\ndef memoize(fn):\n answers = {}\n def fn_(*args):\n if args not in answers:\n answers[args] = fn(*args)\n return answers[args]\n return fn_\n\n@memoize\ndef hash(index, depth=0):\n salt = \"ihaygndm\"\n x = hashlib.md5(salt + str(index)).hexdigest()\n for i in range(depth):\n x = hashlib.md5(x).hexdigest()\n return x\n\ndef produces_key(index, depth=0):\n a = hash(index, depth)\n for i in range(len(a)-2):\n if a[i] == a[i+1] == a[i+2]:\n triple = a[i]\n break\n else:\n return False\n for i in range(1, 1001):\n b = hash(index+i, depth)\n if triple*5 in b:\n return True\n\ndef get_nth(n, depth=0):\n found = 0\n for i in itertools.count():\n if produces_key(i, depth):\n found += 1\n if found == n:\n return i\n\nprint(get_nth(64, 0))\nprint(get_nth(64, 2016))\n","sub_path":"day14.py","file_name":"day14.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"162070338","text":"\"\"\"csdn_net/user/index.py\n\nTODO:\n -[o] update to suit csdn\n\"\"\"\nimport sys, os\nfrom abc import ABC, abstractmethod\n\n#\n# Project path\n#\n''' no need when model at root-dir program\ntry:\n _cwd = os.getcwd()\n _proj_abs_path = _cwd[0:_cwd.find(\"super-spider\")]\n if True: # hole project as a package path\n _package_path = os.path.join(_proj_abs_path, \"\")\n else:\n _package_path = os.path.join(_proj_abs_path, \"\")\n\n if _package_path not in sys.path:\n sys.path.append(_package_path)\n\n # import \nexcept Exception:\n import traceback; traceback.print_exc(); # -[o] fix later by using argv\n sys.exit(1)\n'''\n\n\n################################\n# sites/csdn_net/__init__.py #\n################################\nfrom crawllib import SubjectCrawl\nfrom crawllib import VisitCrawl\n\n\nclass CommonVisitCrawl(VisitCrawl, ABC):\n def __init__(self):\n VisitCrawl.__init__(self)\n\n def _setup_browser(self, browser_type=None):\n VisitCrawl._setup_browser(self, browser_type=None)\n\n def _free_browser(self):\n VisitCrawl._free_browser(self)\n\n @abstractmethod\n def _gen_url(self):\n pass\n\n\n# from sites.jobbole_com import CommonVisitCrawl # /\\ there it is!\n# from GUI.web.selfupdating import update_jobbole_user_id_from_db\n\n# from GUI.web import django_server_port\n\n\n\"\"\"\nclass Subject_CSDN_UserInfo(CommonVisitCrawl, SubjectCrawl):\n def __init__(self, user_id, browser_type=None):\n CommonVisitCrawl.__init__(self)\n SubjectCrawl.__init__(self)\n self.user_id = user_id\n self.browser_type = browser_type\n\n def __str__(self):\n return \"{}: {}\".format(\n type(self).__name__, self.user_id)\n\n # -[o] check data define\n class data:\n # model = ...\n # -[o] update those \\/\n fields = (\"name\", \"url\", \"follow_num\", \"fan_num\", )\n executions = {\n 'name': {'xpath': \"//div[@class='member-profile box']//span[@class='profile-title']//a\",\n 'attribute': \"text\", },\n 'url': {'xpath': \"//div[@class='member-profile box']//span[@class='profile-title']//a\",\n 'func': \"get_attribute\", 'vargs': (\"href\", ), },\n 'follow_num': {'xpath': \"//div[@class='profile-follow']/a\",\n 'attribute': \"text\", },\n 'fan_num': {'xpath': \"//div[@class='profile-follow'][2]/a\",\n 'attribute': \"text\", },\n }\n\n def execute_name(self):\n elem = self.browser.find_element_by_xpath(\n self.data.executions['name']['xpath'])\n if \"attribute\" in self.data.executions['name'].keys():\n ret = getattr(elem, self.data.executions['name']['attribute'])\n elif \"func\" in self.data.executions['name'].keys():\n func = getattr(elem, self.data.executions['name']['func'])\n ret = func(*self.data.executions['name']['vargs'])\n return ret\n\n def _gen_url(self):\n # -[o] update those \\/\n self.url = \"http://www.jobbole.com/members/\" + self.user_id + \"/\"\n return self.url\n\n def _monitor(self):\n SubjectCrawl._monitor(self, browser_type=self.browser_type)\n\n def run_update_from_db(self):\n print(\"you call {}@{} run_update_from_db\".format(\n self.user_id, type(self).__name__\n ))\n # -[o] update this function! \\/\n status_code, text = update_jobbole_user_id_from_db(self.user_id)\n # print(\"\\nprint update_jobbole_user_id_from_db:\", _)\n # print(\"[debug] \", )\n\n return status_code, text\n\"\"\"\n\n\nclass Subject_CSDN_UserInfoVisual(CommonVisitCrawl, SubjectCrawl):\n def __init__(self, user_id, browser_type=None):\n CommonVisitCrawl.__init__(self)\n SubjectCrawl.__init__(self)\n self.user_id = user_id\n self.browser_type = browser_type\n\n def __str__(self):\n return \"{}: {}\".format(\n type(self).__name__, self.user_id)\n\n class data:\n # model = ...\n fields = (\"originality\", \"reprint\",\n \"fans\", \"follow\", \"likes\", \"comments\",\n # \"csdnlevel\",\n \"visitors\",\n # \"intergration\",\n \"rank\", )\n executions = {\n 'originality': {'xpath': \"//div[@class='tab_page my_tab_page']/ul[@class='mod_my_t clearfix']//li[3]/span\",\n 'attribute': \"text\", },\n 'reprint': {'xpath': \"//div[@class='tab_page my_tab_page']/ul[@class='mod_my_t clearfix']//li[4]/span\",\n 'attribute': \"text\", },\n # 'func': \"get_attribute\", 'vargs': (\"href\", ), },\n 'fans': {'xpath': \"//div[@class='my_fans js_fans_att clearfix']/ul[@class='my_fans_bar']//li[1]/em\",\n 'attribute': \"text\", },\n 'follow': {'xpath': \"//div[@class='my_fans js_fans_att clearfix']/ul[@class='my_fans_bar']//li[2]/em\",\n 'attribute': \"text\", },\n 'likes': {'xpath': \"//div[@class='tab_page my_tab_page']/ul[@class='mod_my_t clearfix']//li[7]/span\",\n 'attribute': \"text\", },\n 'comments': {'xpath': \"//div[@class='tab_page my_tab_page']/ul[@class='mod_my_t clearfix']//li[6]/span\",\n 'attribute': \"text\", },\n 'visitors': {'xpath': \"//div[@class='tab_page my_tab_page']/ul[@class='mod_my_t clearfix']//li[2]/span\",\n 'attribute': \"text\", },\n 'rank': {'xpath': \"//div[@class='tab_page my_tab_page']/ul[@class='mod_my_t clearfix']//li[5]/span\",\n 'attribute': \"text\", },\n }\n\n def execute_name(self):\n elem = self.browser.find_element_by_xpath(\n self.data.executions['name']['xpath'])\n if \"attribute\" in self.data.executions['name'].keys():\n ret = getattr(elem, self.data.executions['name']['attribute'])\n elif \"func\" in self.data.executions['name'].keys():\n func = getattr(elem, self.data.executions['name']['func'])\n ret = func(*self.data.executions['name']['vargs'])\n return ret\n\n def _gen_url(self):\n self.url = \"https://me.csdn.net/\" + self.user_id\n return self.url\n\n def _monitor(self):\n SubjectCrawl._monitor(self, browser_type=self.browser_type)\n\n \"\"\"\n def run_update_from_db(self):\n print(\"you call {}@{} run_update_from_db\".format(\n self.user_id, type(self).__name__\n ))\n # -[o] update this function! \\/\n status_code, text = update_jobbole_user_id_from_db(self.user_id)\n # print(\"\\nprint update_jobbole_user_id_from_db:\", _)\n # print(\"[debug] \", )\n\n return status_code, text\n \"\"\"\n","sub_path":"old-arch/super-spider/userindex.py","file_name":"userindex.py","file_ext":"py","file_size_in_byte":6721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"198099959","text":"#coding: utf8\n\nannee = [format(annee) for annee in range(30000, 100000)]\n\nanne = [\"{:05d}\".format(anne) for anne in range(0, 18000)]\n\nprint(annee, anne)\n\n\n# Cette solution fonctionne bien. Elle imprime successivement deux listes contenant tous les numéros de permis possibles dans l'intervalle qu'on cherche.\n# Dans une perspective de journalisme de données, il est encore mieux de réunir l'ensemble de ces nombres dans une seule liste:\n\nmedecins = annee + anne\n\n# Puis, de créer une boucle qui traite chacun des nombres (les numéros de permis possibles) un à un:\n\nfor medecin in medecins:\n\tprint(medecin)","sub_path":"devoir1JHR.py","file_name":"devoir1JHR.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"155763159","text":"import json\nimport datetime as dt\nimport re\nfrom collections import defaultdict\nimport heapq\n\nclass State:\n \"\"\"Encapsulate the snapshot of what's where.\"\"\"\n def __init__(self, floor_dict, elevator_loc=1, counter=0):\n self.floors = {}\n self.counter = counter\n self.elevator_loc = elevator_loc\n for key in floor_dict:\n self.floors[key] = floor_dict[key][:]\n self.sort()\n \n # overload comparison operators! then use heapq's natural ordering \n\n def __gt__(self, other):\n return self.counter + self.floor_score > other.counter + self.floor_score\n \n def __lt__(self, other):\n return self.counter + self.floor_score < other.counter + self.floor_score\n \n def __ge__(self, other):\n return self.counter + self.floor_score >= other.counter + self.floor_score\n \n def __le__(self, other):\n return self.counter + self.floor_score <= other.counter + self.floor_score\n \n def __eq__(self, other):\n # the floors and the elevator have to be the same\n if self.elevator_loc != other.elevator_loc:\n return False\n if self.floors != other.floors:\n return False\n return True\n\n def __hash__(self):\n return hash(self.string_rep)\n\n def get_copy(self):\n d = {1:[], 2:[], 3:[], 4:[]}\n for i in [1,2,3,4]:\n d[i] = self.floors[i][:]\n return d\n\n def sort(self):\n score = 0\n for floor in self.floors:\n self.floors[floor].sort()\n score += 16**(4-floor) * len(self.floors[floor])\n self.floor_score = score\n self.string_rep = self.stringify()\n \n def add_object_to_floor(self, obj_type, floor):\n self.floors[floor].append(obj_type)\n\n def remove_object_from_floor(self, obj_type, floor):\n if obj_type in self.floors[floor]:\n self.floors[floor].pop(self.floors[floor].index(obj_type))\n\n def move_object_between_floors(self, obj_type, floor_1, floor_2):\n if obj_type == None:\n return\n self.remove_object_from_floor(obj_type, floor_1)\n self.add_object_to_floor(obj_type, floor_2)\n\n def is_finished(self):\n for floor in [1,2,3]:\n if self.floors[floor]:\n return False\n return True\n\n def get_counter(self):\n return self.counter\n\n def stringify(self):\n s = json.dumps(self.floors)\n return json.dumps(self.floors) + '-' + str(self.elevator_loc)\n\n def print(self):\n for floor in self.floors:\n print(floor, self.floors[floor])\n\n def get_candidates(self):\n # basically, find where the elevator is and return that floor\n floor_contents = self.floors[self.elevator_loc][:]\n floor_contents.append(None)\n return floor_contents\n\n\n def check_elevator_ride(self, o1, o2):\n return True\n if (o1 and not o2) or (o2 and not o1):\n # only one item, we're good\n return True\n n_generators = sum(['G-' in x for x in [o1, o2]]) \n n_chips = sum(['M-' in x for x in [o1, o2]]) \n # check if elevator ride is ok\n if n_generators == 1 and n_chips == 1:\n if o1.split('-')[1] != o2.split('-')[1]:\n return False\n return True\n\n def check_objs(self, objs):\n # make sure nothing is getting fried.\n chips = []\n generators = []\n for o in objs:\n if 'M-' in o:\n chips.append(o.split('-')[1])\n else:\n generators.append(o.split('-')[1])\n if len(generators) == 0:\n # no generators, everything is fine\n return True\n for chip in chips:\n if chip not in generators:\n # there is no generator for this chip, so not allowd\n return False\n return True\n\n def check_new_floor(self, floor, o1, o2):\n new_floor = self.floors[floor][:]\n [new_floor.append(x) for x in [o1, o2] if x]\n return self.check_objs(new_floor) \n\n def check_old_floor(self, floor, o1, o2):\n old_floor = self.floors[floor][:]\n for o in [o1, o2]:\n if o in old_floor:\n old_floor.pop(old_floor.index(o))\n return self.check_objs(old_floor)\n\n\n def check_is_allowed(self, floor_1, floor_2, o1, o2):\n # check if elevator and objecs are on floor_1\n if o1 and o1 not in self.floors[floor_1]:\n return False\n # need at least one object to move\n if not o1 and not o2:\n return False\n # can only move one floor at a time\n if abs(floor_1 - floor_2) != 1:\n return False\n if not self.check_elevator_ride(o1, o2):\n return False\n \n if not self.check_new_floor(floor_2, o1, o2):\n return False\n\n if not self.check_old_floor(floor_1, o1, o2):\n return False\n\n return True\n\n\ndef get_inputs(test=False):\n path = 'inputs/day11.txt'\n if test:\n path = 'inputs/day11.test.txt'\n with open(path) as f:\n lines = [x.strip() for x in f.readlines()]\n \n floor_map = {'first':1, 'second':2, 'third':3, 'fourth':4}\n d = {u:[] for u in floor_map.values()}\n objs = []\n for line in lines:\n if 'nothing relevant' in line:\n continue\n floor = floor_map[line.split(' ')[1]]\n items = line.split('contains ')[1]\n for part in items.split('and'):\n for obj in part.split(','):\n o = obj.strip()\n if ' ' in o:\n typ = o.split(' ')[-1][0].upper()\n element = o.split(' ')[1][0:2].capitalize()\n obj_to_add = '{}-{}'.format(typ, element)\n objs.append(obj_to_add)\n d[floor].append(obj_to_add)\n \n state = State(d, 1, 0)\n return state, objs\n\ndef get_neighbors(state):\n elevator_loc = state.elevator_loc\n candidate_floors = [x for x in [elevator_loc - 1, elevator_loc + 1] if x > 0 and x < 5]\n\n\n\ndef process_state(queue, visited, state, finished):\n if state.is_finished():\n heapq.heappush(finished, state.get_counter())\n print('huzzah', state.get_counter())\n return\n if state.get_counter() > finished[0]:\n return\n visited[state] = state.get_counter()\n candidates = state.get_candidates()\n floor_options = [x for x in [state.elevator_loc-1, state.elevator_loc+1] if x > 0 and x < 5]\n while candidates:\n o1 = candidates.pop()\n for o2 in candidates:\n for new_floor in floor_options:\n if state.check_is_allowed(state.elevator_loc, new_floor, o1, o2):\n new_state = State(state.get_copy(), new_floor, state.get_counter() + 1)\n new_state.move_object_between_floors(o1, state.elevator_loc, new_floor)\n new_state.move_object_between_floors(o2, state.elevator_loc, new_floor)\n new_state.sort()\n if new_state not in visited and new_state.get_counter() < finished[0]:\n visited[new_state] = new_state.get_counter()\n heapq.heappush(queue, new_state)\n elif new_state in visited:\n if new_state.get_counter() < visited[new_state]:\n visited[new_state] = new_state.get_counter()\n heapq.heappush(queue, new_state)\n\n\ndef do_part(initial_state):\n initial_state.sort()\n unvisited_dict = {}\n unvisited_queue = []\n visited_states = defaultdict(int)\n visited_states['zomg'] = 0\n queue = []\n heapq.heappush(queue, initial_state)\n finished = []\n heapq.heappush(finished, 100)\n while queue:\n #print('queue length is', len(queue), 'visited length is', len(visited_states))\n new_state = heapq.heappop(queue)\n if new_state.get_counter() < finished[0]:\n process_state(queue, visited_states, new_state, finished)\n return finished\n\ndef part2():\n initial_state, objs = get_inputs(False)\n initial_state.add_object_to_floor('El-M', 1)\n initial_state.add_object_to_floor('El-G', 1)\n initial_state.add_object_to_floor('Di-M', 1)\n initial_state.add_object_to_floor('Di-G', 1)\n initial_state.sort()\n finished = do_part(initial_state)\n print('Part 2: Least amount of steps to bring everything to 4th floor is {}'.format(min(finished))) \n\ndef part1():\n initial_state, objs = get_inputs(False)\n finished = do_part(initial_state)\n print('Part 1: Least amount of steps to bring everything to 4th floor is {}'.format(min(finished))) \n #print(visited_states)\n\ndef main():\n part1()\n part2()\n\n\nif __name__ == '__main__':\n begin = dt.datetime.now()\n main()\n diff_time = dt.datetime.now() - begin\n print('That took {:.3f} seconds'.format(diff_time.seconds + 1e-6*diff_time.microseconds))\n","sub_path":"2016/day11.py","file_name":"day11.py","file_ext":"py","file_size_in_byte":8954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"106954886","text":"from django.test import TestCase\nfrom datetime import date\nfrom golfscores.models import GolfCourse, Hole, Game, Score\n\n# Create your tests here.\nclass GolfScoreIntegrationTests(TestCase):\n def setUp(self):\n new_course = GolfCourse.objects.create(course_name='Odana Hills', \n course_city='Madison', \n course_state='WI')\n Hole.objects.create(course=GolfCourse.objects.get(id=1),\n hole_number=1,\n yards=343,\n handicap=13,\n par=4)\n \n def test_record_hole(self):\n \"\"\"\n Check that I can record my score if I played golf on Odana Hills today and parred hole 1\n \"\"\"\n selected_course = GolfCourse.objects.get(course_name='Odana Hills')\n selected_hole = Hole.objects.get(course=selected_course,\n hole_number=1)\n new_game = Game.objects.create(course=selected_course,\n played_date=date.today())\n new_score = Score.objects.create(hole=selected_hole,\n score=4,\n game=new_game)\n self.assertEquals(4, Score.objects.get(game=new_game, \n hole=selected_hole).score) ","sub_path":"golfscores/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"24663748","text":"''' Tag using turtles'''\nimport turtle #import turtle library\nimport random #import random library\nimport math #import math library\n\n'''global variables'''\nscreen = turtle.Screen() #get the screen\nsize = screen.screensize() #get the screen size (returns tuple (width, height))\nwidth = size[0] #get the width of the screen\nheight = size[1] #get the height of the screen\nplayer = turtle.Turtle() #create the player\ncomputer = turtle.Turtle() #create the computer\ngame_over = False #the game is not over\n\n'''player moves forward method'''\ndef forward():\n if not game_over:\n player.forward(5)\n\n'''player rotates left method'''\ndef left():\n if not game_over:\n player.left(5)\n\n'''player rotates right method'''\ndef right():\n if not game_over:\n player.right(5)\n\n'''player moves backrward method'''\ndef backward():\n if not game_over:\n player.backward(5)\n\nplayer.shape(\"turtle\") #choose shape of your player\nplayer.penup() #lift the pen up so that it doesn't draw a line\ncomputer.shape(\"turtle\") #choose shape of your computer\ncomputer.penup() #left the pen up so that it doesn;t draw a line\n\ncolor = input(\"What color would you like your turtle to be?\") #pick a color for the player's turtle\nplayer.color(color) #change the color of the turtle\n\ncomputer_start_x = random.randint(-(width/2), (width/2)) #pick a random number on the screen for the computer's starting x position\ncomputer_start_y = random.randint(-(height/2), (height/2)) #pick a random number on the screen for the computer's starting y position\ncomputer.goto(computer_start_x, computer_start_y) #make the computer go to its starting position\n\nscreen.onkeypress(forward, \"Up\") #make it so that the player moves forward when the up arrow is pressed\nscreen.onkeypress(left, \"Left\") #make it so that the player rotates left when the left arrow is pressed\nscreen.onkeypress(right, \"Right\") #make it so that the player rotates right when the right arrow is pressed\nscreen.onkeypress(backward, \"Down\") #make it so that the player moves backward when the down arrow is pressed\nscreen.listen() #make it so that the screen listens for key presses\n\nwhile not game_over: #repeat until the game_over is true\n player_x = player.xcor() #get player's x position\n player_y = player.ycor() #get player's y position\n\n computer_x = computer.xcor() #get computer's x position\n computer_y = computer.ycor() #get computer's y position\n\n if (int(player_x) == int(computer_x) and (int(player_y) == int(computer_y))): #check if the player's x position is equal to the computer's x positon and the player's y position is equal computer's y position\n game_over = True #set game_over to True since the computer caught the player\n\n distance_x = player_x - computer_x #find the distance between player's x position and computer's x position\n distance_y = player_y - computer_y #find the distance betweem player's y position and computer's y position \n\n radians = math.atan2(distance_y, distance_x) #find the angle in radians of the arctan of distance_y/distance_x\n angle = math.degrees(radians) #convert to degrees\n\n if not game_over: #check if the game is not over\n computer.setheading(angle) #set the computer's angle to the angle\n computer.forward(1) #move forward\n \n\n \n\n \n\n \n \n \n\n","sub_path":"Python/Projects Involving AI/tag AI.py","file_name":"tag AI.py","file_ext":"py","file_size_in_byte":3320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"195735163","text":"import numpy as np\n\ndef peak_ratio_mask(ir_map, wave1, wave2, bg_threshold=1e-2, peak_threshold=0.05,operator = '<', delta=5):\n \"\"\"\n Build a spatial mask on the basis of peak ratios.\n Data need to be in transmission mode.\n\n :param ir_map: image cube\n :param wave1: first wavenumber\n :param wave2: second wavenumber\n :param bg_threshold: threshold of background\n :param peak_threshold: threshold of peak\n :param operator: > or <\n :param delta: selects band size around provided peak values.\n :return:\n \"\"\"\n sel1 = (ir_map.wavenumbers > wave1-delta) & (ir_map.wavenumbers < wave1+delta)\n sel2 = (ir_map.wavenumbers > wave2-delta) & (ir_map.wavenumbers < wave2+delta)\n map1 = np.mean( ir_map.imageCube[:,:,sel1], axis=2 )\n map2 = np.mean( ir_map.imageCube[:,:,sel2], axis=2 )\n\n mask1 = map1 > bg_threshold\n mask2 = map2 > bg_threshold\n bg_mask = mask1*mask2\n \n\n fin = map1 / map2\n if operator == '<':\n fin = fin < peak_threshold\n if operator == '>':\n fin = fin > peak_threshold\n fin = fin & bg_mask\n mask = np.zeros( ir_map.imageCube.shape[0:2] )\n mask[fin] = 1.0 \n return mask\n\n\n\n \n\n\n","sub_path":"lbl_ir/lbl_ir/tasks/preprocessing/spatial_mask.py","file_name":"spatial_mask.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"179361939","text":"\r\n\r\n# ----------------------------------------\r\n# 데이터 생성\r\n\r\nimport mglearn\r\nfrom sklearn.datasets import make_blobs\r\nimport matplotlib.pyplot as plt\r\n\r\n# 세 개의 클래스를 가진 간단한 데이터셋 생성\r\nX, y = make_blobs(random_state=42)\r\n\r\n# ----------------------------------------\r\n# 세 개의 클래스를 가진 2차원 데이터셋의 산점도 그리기\r\n\r\nimport matplotlib as mpl\r\nmpl.rcParams['axes.unicode_minus'] = False\r\n\r\n# 세 개의 클래스를 가진 2차원 데이터셋의 산점도 그리기\r\nmglearn.discrete_scatter(X[:, 0], X[:, 1], y)\r\nplt.xlabel(\"특성 0\")\r\nplt.ylabel(\"특성 1\")\r\nplt.legend([\"클래스 0\", \"클래스 1\", \"클래스 2\"])\r\n\r\nplt.show()\r\n\r\n\r\n\r\n# ----------------------------------------\r\n# LinearSVC 모델 생성\r\n# 선형 서포트 벡터 머신(support vector machine)알고리즘을 구현\r\n# 데이터셋으로 LinearSVC 분류기를 훈련해보겠습니다.\r\nfrom sklearn.svm import LinearSVC\r\n\r\nlinear_svm = LinearSVC().fit(X, y)\r\nprint(\"계수 배열의 크기: \", linear_svm.coef_.shape)\r\nprint(\"절편 배열의 크기: \", linear_svm.intercept_.shape)\r\n# 계수 배열의 크기: (3, 2)\r\n# 절편 배열의 크기: (3,)\r\n\r\n# 결과를 보면\r\n# coef_ 배열의 크기는 (3, 2)입니다. \r\n# coef_의 행은 3개의 클래스에 각각 대응하는 계수 벡터를 담고 있으며, \r\n# 열은 각 특성에 따른 계수 값(이 데이터셋에서는 2 개)을 가지고 있습니다.\r\n# intercept_는 각 클래스의 절편을 담은 1차원 벡터를 가지고 있습니다.\r\n\r\n\r\n\r\n# ----------------------------------------\r\n# 세 개의 이진 분류기가 만드는 경계 시각화하기\r\nimport numpy as np\r\n\r\nmglearn.discrete_scatter(X[:, 0], X[:, 1], y)\r\nline = np.linspace(-15, 15)\r\nfor coef, intercept, color in zip(linear_svm.coef_, linear_svm.intercept_,\r\n mglearn.cm3.colors):\r\n plt.plot(line, -(line * coef[0] + intercept) / coef[1], c=color)\r\n plt.ylim(-10, 15)\r\n plt.xlim(-10, 8)\r\nplt.xlabel(\"특성 0\")\r\nplt.ylabel(\"특성 1\")\r\nplt.legend(['클래스 0', '클래스 1', '클래스 2', '클래스 0 경계', '클래스 1 경계',\r\n '클래스 2 경계'], loc=(1.01, 0.3))\r\n\r\nplt.show()\r\n\r\n# 결과를 보면\r\n# 훈련 데이터의 클래스 0에 속한 모든 포인트는 클래스 0을 구분하는 직선 위에, 즉 이진 분류기가 만든 클래스 0 지역에 있습니다.\r\n# 그런데 클래스 0에 속한 데이터는 클래스 2를 구분하는 직선 위, 즉 클래스 2의 이진 분류기에 의해 나머지로 분류됩니다.\r\n# 또한 클래스 0에 속한 데이터는 클래스 1을 구분하는 직선 왼쪽, 즉 클래스 1의 이진 분류기에 의해서도 나머지로 분류되었습니다. \r\n# 그러므로 이 영역의 어떤 데이터든 최종 분류기는 클래스 0으로 분류되게 됩니다\r\n# (클래스 0 분류 신뢰도 공식의 결과는 0보다 크고 다른 두 클래스의 경우는 0보다 작을 것입니다).\r\n\r\n# 중앙의 삼각형 영역은 (세 분류기가 모두 나머지로 분류했습니다.)\r\n# 정답은 분류 공식의 결과가 가장 높은 클래스입니다. 즉 가장 가까운 직선의 클래스가 될 것입니다.\r\n\r\n# ----------------------------------------\r\n# Step 5: 세 개의 일대다 분류기가 만든 다중 클래스 결정 경계 시각화\r\n\r\n# 2차원 평면의 모든 포인트에 대한 예측 결과를 보여주기\r\nmglearn.plots.plot_2d_classification(linear_svm, X, fill=True, alpha=.7)\r\nmglearn.discrete_scatter(X[:, 0], X[:, 1], y)\r\nline = np.linspace(-15, 15)\r\nfor coef, intercept, color in zip(linear_svm.coef_, linear_svm.intercept_,\r\n mglearn.cm3.colors):\r\n plt.plot(line, -(line * coef[0] + intercept) / coef[1], c=color)\r\nplt.legend(['클래스 0', '클래스 1', '클래스 2', '클래스 0 경계', '클래스 1 경계',\r\n '클래스 2 경계'], loc=(1.01, 0.3))\r\nplt.xlabel(\"특성 0\")\r\nplt.ylabel(\"특성 1\")\r\n\r\n\r\nplt.show()","sub_path":"PythonAnalysis/src/ch-SupervisedLearning/LogisticRegression/03.py","file_name":"03.py","file_ext":"py","file_size_in_byte":4040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"176843889","text":"\n# 정수 = \"1234\"\n# 1 == 정수[0]\n# 2 == 정수[1]\n# 3 == 정수[2]\n# 4 == 정수[3]\n\n# 작업 순서\n# 1. 숫자로만 이루어진 문자열을 입력 받는다.\n# 2. 문자열 길이를 구한다. len() 함수 사용\n# 3. 문자열의 0부터 정수길이-1 까지 1씩 증가시면서\n# 3-1. 문자 한개를 꺼내 정수로 변환\n# 3-2. sum + 정수 를 한다.\n\n# 1. 숫자로만 이루어진 문자열을 입력 받는다.\n정수문자열 = input(\"정수 입력\") # \"1295\"를 입력했다고 가정.\n\n# 2. 문자열 길이를 구한다. len() 함수 사용\n문자열길이 = len(정수문자열) # 4\n\n합계 = 0\nfor i in range(0, 문자열길이, 1): # 3. 0부터 문자열길이-1 까지 1씩 증가시면서\n 문자 = 정수문자열[i] # 3-1. 문자 한개를 꺼내 정수로 변환. 페이지 51쪽 참조.\n x = int(문자) # 문자를 정수로 바꾼다.\n 합계 = 합계 + x # 3-2. sum + 정수 를 한다.\n\nmsg = \"자리수의 합은 %s 입니다\" % (합계)\nprint(msg)\n","sub_path":"python20200322-master/class_Python기초/py08반복문/py08_15_자리수의합.py","file_name":"py08_15_자리수의합.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"601950237","text":"class ExponentialMovingAverage(object):\r\n '''\r\n 权重滑动平均,对最近的数据给予更高的权重\r\n uasge:\r\n # 初始化\r\n ema = EMA(model, 0.999)\r\n # 训练过程中,更新完参数后,同步update shadow weights\r\n def train():\r\n optimizer.step()\r\n ema.update(model)\r\n # eval前,apply shadow weights;\r\n # eval之后(保存模型后),恢复原来模型的参数\r\n def evaluate():\r\n ema.apply_shadow(model)\r\n # evaluate\r\n ema.restore(modle)\r\n '''\r\n def __init__(self,model, decay):\r\n self.decay = decay\r\n self.shadow = {}\r\n self.backup = {}\r\n for name, param in model.named_parameters():\r\n if param.requires_grad:\r\n self.shadow[name] = param.data.clone()\r\n\r\n def update(self,model):\r\n for name, param in model.named_parameters():\r\n if param.requires_grad:\r\n assert name in self.shadow\r\n new_average = (1.0 - self.decay) * param.data + self.decay * self.shadow[name]\r\n self.shadow[name] = new_average.clone()\r\n\r\n def apply_shadow(self,model):\r\n for name, param in model.named_parameters():\r\n if param.requires_grad:\r\n assert name in self.shadow\r\n self.backup[name] = param.data\r\n param.data = self.shadow[name]\r\n\r\n def restore(self,model):\r\n for name, param in model.named_parameters():\r\n if param.requires_grad:\r\n assert name in self.backup\r\n param.data = self.backup[name]\r\n self.backup = {}\r\n","sub_path":"torchblocks/callback/ema.py","file_name":"ema.py","file_ext":"py","file_size_in_byte":1635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"291721405","text":"#!/usr/bin/env python\n# Copyright 2011 Lockheed Martin\n#\n'''\nCreated on Nov 11, 2011\n\n@author: jpoyau\n'''\n\nimport os\nfrom os import path\n\n_PWD = path.abspath(path.dirname(__file__))\n\nimport sys\n#Add the config and lr module the sys path so that they can used.\nsys.path.append(path.abspath(path.join(_PWD,\"../../../../../config\")))\nsys.path.append(path.abspath(path.join(_PWD, \"../../../../\")))\n\n\nimport ConfigParser\nimport couchdb\nimport lrnodetemplate as nodeTemplate \nimport setup_utils, couch_utils\nfrom setup_node import publishNodeConnections\nimport uuid\nimport json\nimport urllib2\nimport urlparse\nfrom services.Resource_Data_Distribution import __ResourceDataDistributionServiceTemplate as DistributeServiceTemplate\nimport subprocess\nfrom lr.lib import helpers as h\nfrom datetime import datetime\nfrom time import sleep\nimport signal\n\nimport logging\n\nlog = logging.getLogger(__name__)\n\n\n_PYLONS_CONFIG = path.abspath(path.join(_PWD, \"../../../../development.ini.orig\"))\n_RESOURCE_DATA_FILTER_APP = path.abspath(path.join(_PWD, \"../../../../../couchdb/resource_data/apps/filtered-replication\"))\n_DISTRIBUTE_TEST_LOG = \"test_distribute.log\"\n\n\nclass Node(object):\n _CONFIG_DATABASE_NAMES=[\"community\", \"network\", \"node\", \"resourcedata\"]\n _RESOURCE_DATA_FILTER = \"\"\"\n function(doc , req)\n {\n if (doc.doc_type == \"resource_data\")\n {\n return true;\n }\n return false;\n }\n \"\"\"\n def __init__(self, nodeConfig, nodeName, communityId=None, networkId=None):\n self._nodeConfig = nodeConfig\n self._nodeName = nodeName\n self._pylonsConfigPath = path.abspath(path.join(path.dirname(_PYLONS_CONFIG),\n self._nodeName+\"_config.ini\"))\n self._setupPylonsConfig()\n self._setupDescriptions()\n self._setupNode()\n self._setupDistributeService()\n self.setNodeInfo(nodeName)\n if communityId is not None:\n self.setCommunityInfo(community)\n if networkId is not None:\n self.setNetworkInfo(networkId)\n self.removeTestLog()\n\n def _getNodeDatabaseList(self):\n return [self._nodeConfig.get(\"couch_info\", db) for db in self._CONFIG_DATABASE_NAMES]\n\n def _getNodeUrl(self):\n return self._nodeConfig.get(\"node_config\", \"node_url\")\n\n def _setupDescriptions(self):\n # Set the node, network and community\n self._communityDescription = dict(nodeTemplate.community_description)\n self._networkDescription = dict(nodeTemplate.network_description)\n self._nodeDescription = dict (nodeTemplate.node_description) \n self._nodeFilterDescription = dict(nodeTemplate.filter_description)\n \n def _setupResourceData(self):\n #Push the filter design document for the ressource_data.\n setup_utils.CreateDB(self._server, \n dblist=[self._nodeConfig.get(\"couch_info\", \"resourcedata\")], \n deleteDB=True)\n\n couch_utils.pushCouchApp(_RESOURCE_DATA_FILTER_APP, \n urlparse.urljoin(self._nodeConfig.get(\"couch_info\", \"server\"),\n self._nodeConfig.get(\"couch_info\", \"resourcedata\")))\n \n def removeTestLog(self):\n try:\n os.remove(path.abspath(path.join(self._pylonsConfig, _DISTRIBUTE_TEST_LOG)))\n except:\n pass\n\n def _setupNode(self):\n #create the couch db databases\n self._server = couchdb.Server(url=self._nodeConfig.get(\"couch_info\", \"server\"))\n setup_utils.CreateDB(self._server, dblist=self._getNodeDatabaseList(), deleteDB=True)\n policy = dict(nodeTemplate.network_policy_description)\n setup_utils.PublishDoc(self._server, \n self._nodeConfig.get(\"couch_info\", \"network\"), \n 'network_policy_description', policy)\n self._setupResourceData()\n \n \n def _setupDistributeService(self):\n custom_opts = {}\n custom_opts[\"node_endpoint\"] = self._getNodeUrl()\n custom_opts[\"service_id\"] = uuid.uuid4().hex\n custom_opts[\"active\"] = True\n \n must = DistributeServiceTemplate()\n config_doc = must.render(**custom_opts)\n\n doc = json.loads(config_doc)\n setup_utils.PublishDoc(self._server, self._nodeConfig.get(\"couch_info\", \"node\") ,\n doc[\"service_type\"]+\":Resource Data Distribution service\", doc)\n\n def _setupPylonsConfig(self):\n #Read the original configuration and update with the test node data.\n pylonsConfig = ConfigParser.ConfigParser()\n pylonsConfig.read(_PYLONS_CONFIG)\n\n #Set the couchdb database info\n for database in self._CONFIG_DATABASE_NAMES:\n pylonsConfig.set(\"app:main\", \"couchdb.db.{0}\".format(database),\n self._nodeConfig.get(\"couch_info\" , database))\n\n #Set the port number and url.\n for option in self._nodeConfig.options(\"pylons_server\"):\n pylonsConfig.set(\"server:main\", option, self._nodeConfig.get(\"pylons_server\", option))\n \n #Add the distribute_sink_url\n pylonsConfig.set(\"app:main\", \"distribute_sink_url\", \n urlparse.urljoin(self._nodeConfig.get(\"couch_info\", \"server\"),\n self._nodeConfig.get(\"couch_info\", \"resourcedata\")))\n #change the logging level to the highest level to avoid spamming log.\n #pylonsConfig.set(\"logger_lr\", \"level\", \"CRITICAL\") \n \n configFile = open(self._pylonsConfigPath, 'w')\n pylonsConfig.write(configFile)\n configFile.close()\n \n \n \n \n def setCommunityInfo(self, community, isSocialCommunity=True):\n self._communityDescription[\"community_id\"]= community\n self._communityDescription[\"community_name\"] = community\n self._communityDescription[\"community_description\"] = community\n self._communityDescription[\"social_community\"] = isSocialCommunity\n \n self._networkDescription[\"community_id\"] = community\n self._nodeDescription[\"community_id\"] = community\n \n setup_utils.PublishDoc(self._server, self._nodeConfig.get(\"couch_info\", \"node\"), \n self._nodeDescription[\"doc_type\"] , \n self._nodeDescription)\n \n setup_utils.PublishDoc(self._server, self._nodeConfig.get(\"couch_info\", \"network\"),\n self._networkDescription[\"doc_type\"], \n self._networkDescription)\n \n setup_utils.PublishDoc(self._server, self._nodeConfig.get(\"couch_info\", \"community\"),\n self._communityDescription[\"doc_type\"], \n self._communityDescription)\n\n def setNetworkInfo(self, network):\n self._networkDescription[\"network_name\"] = network\n self._networkDescription[\"network_description\"] = network\n self._networkDescription[\"network_id\"] = network\n self._nodeDescription[\"network_id\"] = network\n \n setup_utils.PublishDoc(self._server, self._nodeConfig.get(\"couch_info\", \"node\"), \n self._nodeDescription[\"doc_type\"] , \n self._nodeDescription)\n \n setup_utils.PublishDoc(self._server, self._nodeConfig.get(\"couch_info\", \"network\"),\n self._networkDescription[\"doc_type\"], \n self._networkDescription)\n \n def setNodeInfo(self, nodeName=None, isGateway=False, isActive=True):\n self._nodeDescription[\"node_id\"] = uuid.uuid4().hex\n if nodeName is not None:\n self._nodeDescription[\"node_name\"] = nodeName\n self._nodeDescription[\"node_description\"] = nodeName\n self._nodeDescription[\"gateway_node\"] = isGateway\n self._nodeDescription[\"active\"]= isActive\n self._nodeDescription[\"node_admin_identity\"] = \"testNode@admin.distribute\"\n \n setup_utils.PublishDoc(self._server, self._nodeConfig.get(\"couch_info\", \"node\"), \n self._nodeDescription[\"doc_type\"] , \n self._nodeDescription)\n \n def setFilterInfo(self, filter, include_exclude=True, custom_filter=False):\n self._nodeFilterDescription[\"filter_name\"] = self._nodeName + \"_filter\"\n self._nodeFilterDescription[\"include_exclude\"] = include_exclude\n self._nodeFilterDescription[\"custom_filter\"] = custom_filter\n self._nodeFilterDescription[\"filter\"]=filter\n \n setup_utils.PublishDoc(self._server, self._nodeConfig.get(\"couch_info\", \"node\"), \n self._nodeFilterDescription[\"filter_name\"] , \n self._nodeFilterDescription)\n \n\n def publishResourceData(self, docs):\n resourceDatabase = self._server[self._nodeConfig.get(\"couch_info\", \"resourcedata\")]\n for d in docs:\n doc = {}\n doc.update(d)\n #delete any previous revision number for the docs\n if '_rev' in doc:\n del doc['_rev']\n \n doc['doc_ID'] = uuid.uuid4().hex\n now = datetime.utcnow().isoformat()+\"Z\"\n doc['node_timestamp'] = now\n doc['create_timestamp'] = now\n doc['update_timestamp'] = now\n resourceDatabase[doc['doc_ID']] = doc\n \n def addConnectionTo(self, destinationUrl, gateway_connection=False):\n connection = dict(nodeTemplate.connection_description)\n connection['connection_id'] = uuid.uuid4().hex\n connection['source_node_url']=self._getNodeUrl()\n connection['gateway_connection'] = gateway_connection\n connection['destination_node_url'] = destinationUrl\n setup_utils.PublishDoc(self._server, self._nodeConfig.get(\"couch_info\", \"node\"), \n \"{0}_to_{1}_connection\".format(self._nodeName, destinationUrl),\n connection)\n \n \n def distribute(self):\n if hasattr(self, '_pylonsProcess'):\n data = json.dumps({\"dist\":\"dist\"})\n request = urllib2.Request(urlparse.urljoin(self._getNodeUrl(), \"distribute\"), \n data,\n {'Content-Type':'application/json; charset=utf-8'})\n response = urllib2.urlopen(request) \n\n def getResourceDataDocs(self, filter_description=None):\n \n db = self._server[self._nodeConfig.get(\"couch_info\", \"resourcedata\")]\n \n #For source node get all the resource_data documents using the filter\n # that was using to distribute the document to destination node.\n options = { \"filter\": \"filtered-replication/change_feed_filter\",\n \"include_docs\":True,\n \"doc_type\":\"resource_data\"}\n if filter_description is not None:\n options[\"filter_description\"] = json.dumps(filter_description)\n return db.changes(**options)[\"results\"]\n \n def compareDistributedResources(self, destination, filter_description=None):\n \"\"\"This method considered this node as source node.\n It compares its resource_data document with the destionation node to\n verify that data was distributed. This comparison assumes that distribute/\n replication is done and that there is no other additions or deletions the\n nodes that are being compared\"\"\"\n\n sourceResults = self.getResourceDataDocs(filter_description)\n destinationResults = destination.getResourceDataDocs() \n \n #check the number of source document is the same at destination.\n #otherwise the nodes resource distribution failed somehow.\n if len(destinationResults) != len(sourceResults):\n return False\n \n #Sort the documents by doc id for easy comparison\n sourceDocs = sorted(sourceResults, key= lambda doc: doc[\"id\"])\n destinationDocs = sorted(destinationResults, key= lambda doc: doc[\"id\"])\n \n # compare documents by documents and check that the destionation time is\n # greater than the source node time.\n for i in range(len(sourceDocs)):\n sourceDoc = {}\n sourceDoc.update(sourceDocs[i][\"doc\"])\n destDoc ={}\n destDoc.update(destinationDocs[i][\"doc\"])\n if (h.convertToISO8601UTC(destDoc[\"node_timestamp\"]) <= h.convertToISO8601UTC(sourceDoc[\"node_timestamp\"])):\n log.debug(\"{0} and {1} error\".format(sourceDoc['doc_ID'], destDoc['doc_ID']))\n return False\n #remove the node_timestamp and _rev then compare the docs\n del sourceDoc[\"node_timestamp\"]\n del destDoc[\"node_timestamp\"]\n del destDoc[\"_rev\"]\n del sourceDoc[\"_rev\"]\n if sourceDoc != destDoc:\n log.debug(\"{0} and {1} error\".format(sourceDoc['doc_ID'], destDoc['doc_ID']))\n return False\n return True\n \n def stop(self):\n if hasattr(self, '_pylonsProcess'):\n os.killpg(self._pylonsProcess.pid, signal.SIGTERM)\n \n def start(self):\n command = '(cd {0}; paster serve {1} --log-file {2}.log)'.format(\n path.abspath(path.dirname(self._pylonsConfigPath)),\n self._pylonsConfigPath, _DISTRIBUTE_TEST_LOG) \n #Create a process group name as so that the shell and all its process\n # are terminated when stop is called.\n self._pylonsProcess = subprocess.Popen(command, shell=True, \n preexec_fn=os.setsid)\n \n def resetResourceData(self):\n del self._server[ self._nodeConfig.get(\"couch_info\", \"resourcedata\")]\n self._setupResourceData()\n \n \n def tearDown(self):\n self.stop()\n #Delete the generated pylons configuration files\n os.remove(self._pylonsConfigPath)\n #Delete the generated database.\n for database in self._getNodeDatabaseList():\n del self._server[database]\n\n\n","sub_path":"LR/lr/tests/functional/distribute/lr_node.py","file_name":"lr_node.py","file_ext":"py","file_size_in_byte":14756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"166599065","text":"\nimport threading\n\nclass UFuture(object):\n\n\t# States for Future.\n\tPENDING = 0\n\tCOMPLETED = 1\n\tCANCELLED = 2\n\n\tdef __init__(self, ash = None):\n\t\tself._lock_ = threading.Lock()\n\t\tself._cv_ = threading.Condition(self._lock_)\n\t\tself._state_ = UFuture.PENDING\n\t\tself._v_ = None\n\t\tself._ash_ = ash\n\n\tdef set(self, v):\n\t\twith self._lock_:\n\t\t\tif self._state_ <= UFuture.COMPLETED:\n\t\t\t\tself._v_ = v\n\t\t\t\tself._state_ = UFuture.COMPLETED\n\t\t\tself._cv_.notify_all()\n\n\tdef cancel(self, mayInterruptIfRunning = True):\n\t\tcanceled = False\n\t\twith self._lock_:\n\t\t\twhile True:\n\t\t\t\tif self._state_ >= UFuture.COMPLETED:\n\t\t\t\t\tbreak\n\t\t\t\tif mayInterruptIfRunning and self._ash_:\n\t\t\t\t\tcs = self._ash_.AttachedClientSocket\n\t\t\t\t\tif cs:\n\t\t\t\t\t\tcs.Cancel()\n\t\t\t\tself._state_ = UFuture.CANCELLED\n\t\t\t\tcanceled = True\n\t\t\t\tself._cv_.notify_all()\n\t\t\t\tbreak;\n\t\treturn canceled\n\n\t@property\n\tdef canceled(self):\n\t\twith self._lock_:\n\t\t\treturn self._state_ >= UFuture.CANCELLED\n\n\t@canceled.setter\n\tdef canceled(self, v):\n\t\tif not v:\n\t\t\treturn\n\t\twith self._lock_:\n\t\t\tif self._state_ < UFuture.COMPLETED:\n\t\t\t\tself._state_ = UFuture.CANCELLED\n\t\t\tself._cv_.notify_all()\n\n\t@property\n\tdef state(self):\n\t\twith self._lock_:\n\t\t\treturn self._state_\n\n\t@property\n\tdef done(self):\n\t\twith self._lock_:\n\t\t\treturn self._state_ >= UFuture.COMPLETED\n\n\tdef get(self, timeout):\n\t\twith self._lock_:\n\t\t\tif self._state_ == UFuture.PENDING:\n\t\t\t\tself._cv_.wait(timeout)\n\t\t\treturn self._v_\n","sub_path":"bin/spa/clientside/ufuture.py","file_name":"ufuture.py","file_ext":"py","file_size_in_byte":1423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"274740399","text":"import platform\nimport os\nimport socket\nimport fcntl\nimport struct\n\nfrom apps.server.modules.libs.mod_interfaceRunCmd import mod_interfaceRunCmd\n\n\nclass mod_info(mod_interfaceRunCmd):\n\n cmd_short = \"in\"\n cmd_long = \"info\"\n cmd_desc = \"Info module\"\n\n cmd_sw_vers = \"sw_vers\"\n cmd_uname_long = \"uname -a\"\n\n def setup_mod(self):\n print(f'Module Setup (mod_info) called successfully!')\n\n def run_mod(self, cmd=\"\", param=\"\"):\n # hostname = socket.gethostname()\n # local_ip = socket.gethostbyname(hostname)\n # self.get_hostName()\n sRet = f'#========================================================================#\\n'\n sRet += f'| Info for server - IP: {self.get_hostIP()}\\n'\n if param == \"\" or param == \"-m\" or param == \"-a\":\n sRet += f\"| Host: \" + socket.gethostname() + \"\\n\"\n if param == \"\" or param == \"-m\" or param == \"-a\":\n sRet += f\"| System: \" + self.get_model()\n if param == \"\" or param == \"-p\" or param == \"-a\":\n sRet += f\"| Platform: \" + self.get_platform() + \"\\n\"\n if param == \"\" or param == \"-v\" or param == \"-a\":\n sRet += f\"| macOS version: \" + self.get_macVer() + \"\\n\"\n if param == \"\" or param == \"-vv\" or param == \"-a\":\n arrVers = self.get_vers()\n strVers = arrVers.replace(\"\\n\", \"\\n| \")\n sRet += f\"| macOS v. ext.: \\n| \" + strVers + \"\\n\"\n if param == \"\" or param == \"-u\" or param == \"-a\":\n sRet += f\"| Current user: {self.get_user()}\\n\"\n if param == \"\" or param == \"-s\" or param == \"-a\":\n sRet += f\"| {self.get_sip()}\"\n if param == \"-s\":\n sRet += \"\\n\"\n if param == \"\" or param == \"-w\" or param == \"-wl\" or param == \"-a\":\n sRet += f\"| WiFi: \\n\" + self.get_wifi(True)\n if param == \"-ws\":\n sRet += f\"| WiFi: \\n\" + self.get_wifi(False)\n if param == \"\" or param == \"-b\" or param == \"-a\":\n sRet += (\"|\" if param == \"-b\" else \"\") + f\" Battery: \" + self.get_battery()\n if param != \"-v\" and param != \"-m\" and param != \"-p\" and param != \"-u\":\n sRet = sRet[:-1]\n sRet += f'#========================================================================#\\n'\n return sRet\n\n def mod_helptxt(self):\n help_txt = {\n 'desc': self.pritify4log(\"The 'Info' module returns information about\\n\"\n \"the server system like macOS version, pc model,\\n\"\n \"wifi info or the battery condition.\"),\n 'cmd': f'{self.getCmdVariants4Help()} [-a|-m|-p|-v|-u|-s|-w|-wl|-ws|-b]',\n 'ext': self.pritify4log(\n '-a\\tAll available information - the default (like no param)\\n'\n '-m\\tSystem model \\n'\n '-p\\tPlatform\\n'\n '-v\\tOnly macOS version\\n'\n '-u\\tOnly current username (and is root?)\\n'\n '-s\\tIs SIP enabled\\n'\n '-w|-wl\\tAll Wifi-Info\\n'\n '-ws\\tBasic Wifi-Info only\\n'\n '-b\\tInformation about the battery')\n }\n return help_txt\n\n def get_hostIP(self):\n myIP = \"127.0.0.1\"\n addrInfo = socket.getaddrinfo(socket.gethostname(), None, family=socket.AF_INET, proto=socket.IPPROTO_TCP)\n addrInfoNG = addrInfo\n\n for hn in addrInfo:\n if hn[4][0] != myIP:\n myIP = hn[4][0]\n break\n\n return myIP\n\n def get_macVer(self):\n return str(platform.mac_ver()[0])\n\n def get_sip(self):\n return self.run_command(\"csrutil status\")\n\n def get_vers(self):\n return self.run_command(self.cmd_sw_vers).strip()\n\n def get_platform(self):\n return self.run_command(self.cmd_uname_long).strip()\n\n def get_user(self):\n current_user = self.run_command(\"whoami\").strip()\n isRoot = False\n if current_user == \"root\":\n isRoot = True\n return f'{current_user} (isRoot: {isRoot})'\n\n def get_model(self):\n return self.run_command(\"sysctl -n hw.model\")#.decode('utf-8')\n\n def get_wifi(self, allinfo = True):\n if allinfo:\n command = \"/System/Library/PrivateFrameworks/Apple80211.framework/Versions/Current/Resources/airport -I\" # | grep -w SSID\"\n else:\n command = \"/System/Library/PrivateFrameworks/Apple80211.framework/Versions/Current/Resources/airport -I | grep -w SSID\"\n sRet = self.run_command(command).replace(\"\\n\", \"\\n|\")\n return \"| \" + sRet.strip() #self.run_command(command)#.decode('utf-8')#.replace(\"SSID: \", \"\").strip()\n\n def get_battery(self):\n sRet = self.run_command(\"pmset -g batt\").replace(\"\\n\", \"\\n| \").replace(\"\\t\", \"\\n| \")\n return sRet[:-1] #.decode(\"utf-8\")# | egrep \\\"([0-9]+\\\\%).*\\\" -o | cut -f1 -d\\';\\'\")","sub_path":"apps/server/modules/mod_info.py","file_name":"mod_info.py","file_ext":"py","file_size_in_byte":4922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"437497782","text":"import os\nimport cv2\nimport dlib\nimport numpy as np\nimport pandas as pd\nimport imutils\nimport math\n\n# image size for prediction\nimg_width = 200\nimg_height = 200\n# scale factor for preprocessing\npicSize = 400\nrotation = True\n\n# face detector\npathDet = 'Emotion_Dog/dogHeadDetector.dat'\npathCat = \"Emotion_Cat/haarcascade_frontalcatface.xml\"\n\nfaceCascade = cv2.CascadeClassifier(cv2.data.haarcascades + pathCat)\ndetector = dlib.cnn_face_detection_model_v1(pathDet)\n\n\nclass No_Preprocessing:\n\n def __init__(self, img_width, img_height):\n self.img_width = img_width\n self.img_height = img_height\n\n def extract_and_prepare_pixels(self, pixels):\n \"\"\"\n Takes in a string (pixels) that has space separated integer values and returns an array which includes the\n pixels for all img.\n \"\"\"\n pixels_as_list = [item[0] for item in pixels.values.tolist()]\n np_image_array = []\n for index, item in enumerate(pixels_as_list):\n # split space separated ints\n pixel_data = item.split()\n img_size_row = img_size_col = 256\n if len(pixel_data) % 490 == 0:\n img_size_row = 490\n img_size_col = 640\n elif len(pixel_data) == 10000:\n img_size_row = img_size_col = 100\n\n data = np.zeros((img_size_row, img_size_col), dtype=np.uint8)\n\n # Loop through rows\n for i in range(0, img_size_row):\n try:\n # (0 = 0), (1 = 47), (2 = 94), ...\n pixel_index = i * img_size_col\n # (0 = [0:47]), (1 = [47: 94]), (2 = [94, 141]), ...\n data[i] = pixel_data[pixel_index:pixel_index + img_size_col]\n except:\n pass\n\n np_image_array.append(np.array(data))\n np_image_array = np.array(np_image_array)\n return np_image_array\n\n def predict_emotion(self, model, img):\n \"\"\"\n Use a trained model to predict emotional state\n \"\"\"\n\n emotion = 'None'\n\n prediction = model.predict(img)[0]\n print(\"prediction is: \", prediction )\n # ->\n prediction = [round(x * 100, 2) for x in prediction]\n print(prediction)\n prediction_ = np.argmax(prediction)\n\n if prediction_ == 0:\n emotion = 'Angry'\n elif prediction_ == 1:\n emotion = 'Happy'\n elif prediction_ == 2:\n emotion = 'Neutral'\n elif prediction_ == 3:\n emotion = 'Sad'\n elif prediction_ == 4:\n emotion = 'Scared'\n print(\"the most likely emotion is :\", emotion)\n d = {'emotion': ['Angry', 'Happy', 'Neutral', 'Sad', 'Scared'], 'prob': prediction}\n df = pd.DataFrame(d, columns=['emotion', 'prob']).sort_values(by=['prob'], ascending=False)\n df.prob = [str(x) + '%' for x in df.prob]\n return df\n\n\ndef dog_preprocess(path):\n # read image from path\n orig = cv2.imread(path)\n dirpath, filedir = os.path.split(path)\n filename, extend = os.path.splitext(filedir)\n\n if not orig is None:\n # resize\n height, width, channels = orig.shape # read size\n ratio = picSize / height\n image = cv2.resize(orig, None, fx=ratio, fy=ratio)\n\n # color gray\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n # detect face(s)\n dets = detector(gray, upsample_num_times=1)\n imageList = [] # for return\n for i, d in enumerate(dets):\n # save coordinates\n x1 = max(int(d.rect.left() / ratio), 1)\n y1 = max(int(d.rect.top() / ratio), 1)\n x2 = min(int(d.rect.right() / ratio), width - 1)\n y2 = min(int(d.rect.bottom() / ratio), height - 1)\n\n imageList.append(orig)\n print(x1, x2, y1, y2)\n\n # prepare for prediction\n little = cv2.resize(gray[y1:y2, x1:x2], (img_width, img_height)) # crop and resize\n pixel = cv2.cvtColor(little, cv2.COLOR_BGR2GRAY)\n to_csv_data = ' '.join(map(str, pixel.flatten()))\n x = np.expand_dims(pixel, axis=0)\n x = x.reshape((-1, 100, 100, 1))\n imageList.append(x)\n return imageList, to_csv_data # order: marked picture, input for classifier\n return None\n\n\ndef cat_preprocess(path):\n orig = cv2.imread(path)\n dirpath, filedir = os.path.split(path)\n filename, extend = os.path.splitext(filedir)\n\n if not orig is None:\n # resize\n height, width, channels = orig.shape # read size\n ratio = picSize / height\n image = cv2.resize(orig, None, fx=ratio, fy=ratio)\n\n # color gray\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n # detect face(s)\n faces = faceCascade.detectMultiScale(\n gray,\n scaleFactor=1.02,\n minNeighbors=3,\n minSize=(50, 50),\n flags=cv2.CASCADE_SCALE_IMAGE\n )\n # t = datetime.datetime.now()\n for (i, (x, y, w, h)) in enumerate(faces):\n # prepare for prediction\n little = cv2.resize((image[y:y + h, x:x + w]), (img_width, img_height))\n pi = cv2.cvtColor(little, cv2.COLOR_BGR2GRAY)\n to_csv_data = ' '.join(map(str, pi.flatten()))\n return to_csv_data\n return None\n\n\ndef renameFile(path):\n print('come into path:' + path)\n fileList = os.listdir(path)\n for file in fileList:\n oldfilename = path + os.sep + file\n newfilename = path + os.sep + file[:-4] + '.jpg'\n os.rename(oldfilename, newfilename)\n print(oldfilename + ' -> ' + newfilename)\n print(\"done\")\n","sub_path":"Web/FlaskServer/scripts/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":5689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"82877617","text":"\nfrom pylab import *\nimport numpy as np\nimport random\nimport matplotlib.cbook as cbook\nimport random\nimport time\nfrom scipy.misc import imread\nfrom scipy.misc import imsave\nfrom scipy.misc import imresize\nimport matplotlib.image as mpimg\nimport matplotlib.pyplot as plt\nimport os\n\n#os.chdir('E:/University Material/2014-2015/CSC320/Assignment/A3/')\n\n# Principal Component Analysis\ndef pca(X):\n\n # get dimensions\n num_data,dim = X.shape\n # center data\n mean_X = X.mean(axis=0)\n X = X - mean_X\n\n if dim>num_data:\n # PCA - compact trick used\n M = dot(X,X.T) # covariance matrix\n e,EV = linalg.eigh(M) # eigenvalues and eigenvectors\n tmp = dot(X.T,EV).T # this is the compact trick\n V = tmp[::-1] # reverse since last eigenvectors are the ones we want\n S = sqrt(e)[::-1] # reverse since eigenvalues are in increasing order\n for i in range(V.shape[1]):\n V[:,i] /= S\n else:\n # PCA - SVD used\n U,S,V = linalg.svd(X)\n V = V[:num_data] # only makes sense to return the first num_data\n \n # return the projection matrix, the variance and the mean\n return V,S,mean_X\n\ndef display_save_25_comps(V, im_shape):\n '''Display 25 components in V'''\n figure()\n for i in range(25):\n plt.subplot(5, 5, i+1)\n plt.axis('off')\n gray()\n imshow(V[i,:].reshape(im_shape))\n savefig('top_25_comps.jpg') \n\n \n# Return an nested array of requested size with no duplicate\ndef get_set(num, img_array, index_list):\n \n count = 0\n set = []\n \n while(count != num):\n ranNum = random.randint(0, len(img_array) - 1)\n if (ranNum not in index_list):\n set.append(img_array[ranNum])\n index_list.append(ranNum)\n count += 1\n \n return set\n \n# Return all the arrays for this given actor\ndef get_actor_set(img_dir, actor):\n im_files = sorted([img_dir + filename for filename in os.listdir(img_dir) if actor in filename])\n im_shape = array(imread(im_files[0])).shape[:2] # open one image to get the size \n im_matrix = array([imread(im_file).flatten() for im_file in im_files])\n im_matrix = array([im_matrix[i,:]/(norm(im_matrix[i,:])+0.0001) for i in range(im_matrix.shape[0])])\n gray()\n return (im_matrix, im_shape)\n\n\n# Find the closest match in the training set\n# Return the index in the training sets\ndef find_closest_match(V, image, training_sets):\n \n min_index = 0\n min_ssd = float(\"inf\") \n\n # Project the input image on to the eigenspace\n x_pca = [np.dot(V[i].T , (image - mean_im)) for i in range(len(V))]\n\n # Loop through 800 training sets and find the closest image\n for i in range(len(training_sets)):\n temp = [np.dot(V[k].T , (training_sets[i] - mean_im)) for k in range(len(V))]\n diff = sum([(a - b)**2 for a, b in zip(temp , x_pca)])\n if min_ssd > diff:\n min_ssd = diff\n min_index = i\n \n return min_index\n\n# Find the label for every image in the test set\ndef find_label(test_set, training_sets, V, actor):\n\n label = []\n \n for i in range(len(test_set)):\n for img in test_set[i]:\n index = find_closest_match(V, img, training_sets)\n label.append(actor[int(index / 100)])\n \n return np.array(label)\n\n# Divide the dataset into 3 sets\ndef part2(actors, file_dir):\n \n # For each actor\n test = []\n train = []\n validation = []\n \n # For all actors\n test_set = []\n training_set = []\n validation_set = []\n \n actor_set = []\n i = 0\n \n # Divide the dataset into 3 array lists\n # With no duplicate\n for a in actors:\n index_list = []\n temp = (get_actor_set(file_dir, a))\n actor_set.append(temp[0])\n \n train.append(get_set(100, actor_set[i], index_list))\n validation.append(get_set(10, actor_set[i], index_list))\n test.append(get_set(10, actor_set[i], index_list))\n \n training_set += train[i]\n validation_set += validation[i]\n test_set += test[i]\n i += 1\n\n return [[train,validation, test],[training_set, validation_set, test_set], [temp[1]]]\n \n# Part 3 and 4\n# Compute face recognition by actor name or gender\ndef part3and4(eigenFaces, training_sets, validation_set, test_set, actor_name_or_gender):\n correct_label = [[a]*10 for a in actor_name_or_gender]\n correct_label = np.array(correct_label).flatten()\n k_lst = [2, 5, 10, 20, 50, 80, 100, 150, 200]\n best_k = k_lst[0]\n max_correct_label = 0\n \n # Tuning, finding the best k using validation set\n for k in k_lst:\n # Check the label for every image in the validation sets\n result_label = find_label(validation_set,training_sets, eigenFaces[:k], actor_name_or_gender)\n num_correct_label = (result_label == correct_label).sum()\n accuracy = round(num_correct_label / float(80), 2)\n \n if max_correct_label <= num_correct_label:\n max_correct_label = num_correct_label \n best_k = k\n \n # The result for each k for the validation set \n print (\"k = \", k, \" best k = \", best_k)\n print (\"Number of correct label = \", num_correct_label)\n print (\"Total Number of validation cases = 80\")\n print (\"Accuracy = \", accuracy)\n print (\"\")\n \n # The result of facial recognition on the test set using the best k\n result_label = find_label(test_set,training_sets, eigenFaces[:best_k], actor_name_or_gender)\n num_correct_label = (result_label == correct_label).sum()\n accuracy = round(num_correct_label / float(80), 2)\n\n print (\"Final\")\n print (\"k = \", k, \" best k = \", best_k)\n print (\"Number of correct label = \", num_correct_label)\n print (\"Total Number of test cases = 80\")\n print (\"Accuracy = \", accuracy)\n print (\"\") \n \n\n# Change this to your directory:\n#files_dir = 'E:/University Material/2014-2015/CSC320/Assignment/A3/cropped/'\nfiles_dir = './cropped/'\nall_actor = ['eckhart','sandler','brody','anders','benson','applegate','agron','anderson']\nactor_gender = ['M', 'M', 'M', 'F', 'F', 'F', 'F', 'F']\n\n# Part2\n# Divied Dataset\ndivied_sets = part2(all_actor, files_dir)\n\n# For each actor\ntrainSet = divied_sets[0][0]\nvalidateSet = divied_sets[0][1]\ntestSet = divied_sets[0][2]\n\n# For all actors\ntrainSets = divied_sets[1][0]\nvalidatSets = divied_sets[1][1]\ntestSets = divied_sets[1][2]\n\nshape = divied_sets[2][0]\n\n# Convert the list into an array\nim_matrix = np.array(trainSets)\nim_shape = shape\n\n# Normlize the array\nfor i in range(im_matrix.shape[0]):\n im_matrix[i] = im_matrix[i]/255.0\n \n# Find the variance, average/mean face of the given array\nV, S, mean_im = pca(im_matrix)\n\n# Display and save the top 25 eigenfaces\n#display_save_25_comps(V, im_shape)\n\n# Part 3\npart3and4(V, trainSets, validateSet, testSet, all_actor)\n\n# Part 4\npart3and4(V, trainSets, validateSet, testSet, actor_gender)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"2014-2015/CSC320/A3/p3.py","file_name":"p3.py","file_ext":"py","file_size_in_byte":6989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"556698769","text":"from hero import Hero\nfrom enemy import Enemy\nfrom random import choice, randint\nfrom human import Human\nfrom item import Weapon, Spell, Potion\nfrom treasures import ts, random_spell_with_tier\nfrom map_crawlers import queue_crawler\n\n\nDIFFICULTY_LEVELS_ATTACK = {\n 1: range(5, 10),\n 2: range(11, 15),\n 3: range(16, 20)\n}\n# for each difficulty determines some range\n# in which each enemy's default attack power will be\n\nDIFFICULTY_LEVELS_HEALTH = {\n 1: range(35, 50),\n 2: range(50, 65),\n 3: range(65, 90)\n}\n# for each difficulty determines some range\n# in which each enemy's starting health will be\n\n\nclass Dungeon:\n\n def __init__(self, map_file_name):\n self.hero = None\n self.__tile_objects = dict()\n self.__difficulty = 0\n if map_file_name == 'level1.txt':\n self.__difficulty = 1\n elif map_file_name == 'level2.txt':\n self.__difficulty = 2\n elif map_file_name == 'level3.txt':\n self.__difficulty = 3\n self.__treasures = ts\n self.map_file_name = map_file_name\n self.dungeon_map = self.__read_map_file(map_file_name)\n\n def __put_boss(self, x_pos, y_pos):\n health_ = choice(DIFFICULTY_LEVELS_HEALTH[self.__difficulty])\n damage_ = choice(DIFFICULTY_LEVELS_ATTACK[self.__difficulty])\n\n s_info = random_spell_with_tier(1)\n s = Spell(**s_info[1])\n e = Enemy(health=health_ * 1.2, mana=100, class_='Paladin', damage=damage_)\n e.learn(spell=s)\n e.set_position(x=x_pos, y=y_pos)\n\n if (x_pos, y_pos) in self.__tile_objects:\n self.__tile_objects[(x_pos, y_pos)].append(e)\n else:\n self.__tile_objects[(x_pos, y_pos)] = [e]\n\n return \".\"\n\n def __random_enemy(self):\n health_ = choice(DIFFICULTY_LEVELS_HEALTH[self.__difficulty])\n damage_ = choice(DIFFICULTY_LEVELS_ATTACK[self.__difficulty])\n\n return Enemy(health=health_, mana=100, class_='Paladin', damage=damage_)\n\n def __random_treasure(self):\n cur_treasure = self.__treasures\n\n while type(cur_treasure) is dict:\n dice_roll = randint(0, 99)\n for k, v in cur_treasure.items():\n if dice_roll in k:\n cur_treasure = v\n\n if cur_treasure[0] == \"weapon\":\n return Weapon(**cur_treasure[1])\n elif cur_treasure[0] == \"spell\":\n return Spell(**cur_treasure[1])\n else:\n return Potion(**cur_treasure[1])\n\n def __put_starting_point(self, x_pos, y_pos):\n if (x_pos, y_pos) in self.__tile_objects:\n self.__tile_objects[(x_pos, y_pos)].append(\"S\")\n else:\n self.__tile_objects[(x_pos, y_pos)] = [\"S\"]\n\n self.visible_tiles.add((x_pos, y_pos))\n\n return \".\"\n\n def __put_enemy(self, x_pos, y_pos):\n new_enemy = self.__random_enemy()\n\n new_enemy.set_position(x=x_pos, y=y_pos)\n\n if (x_pos, y_pos) in self.__tile_objects:\n self.__tile_objects[(x_pos, y_pos)].append(new_enemy)\n else:\n self.__tile_objects[(x_pos, y_pos)] = [new_enemy]\n\n return \".\"\n\n def __put_treasure(self, x_pos, y_pos):\n new_treasure = self.__random_treasure()\n if (x_pos, y_pos) in self.__tile_objects:\n self.__tile_objects[(x_pos, y_pos)].append(new_treasure)\n else:\n self.__tile_objects[(x_pos, y_pos)] = [new_treasure]\n\n return \".\"\n\n def __put_gateway(self, x_pos, y_pos):\n if (x_pos, y_pos) in self.__tile_objects:\n self.__tile_objects[(x_pos, y_pos)].append('G')\n else:\n self.__tile_objects[(x_pos, y_pos)] = ['G']\n\n return \".\"\n\n def __read_map_file(self, map_file_name):\n\n CHARMAP = {\n \"#\": lambda *args: \"#\", # function returning a 'wall'\n \".\": lambda *args: \".\", # function returning an 'empty space'\n \" \": lambda *args: \" \",\n \"S\": self.__put_starting_point, # function for creating a starting point for spawn/respawn\n \"E\": self.__put_enemy, # function for creating an enemy\n \"T\": self.__put_treasure, # function for creating treasure\n \"G\": self.__put_gateway,\n \"B\": self.__put_boss\n }\n\n self.visible_tiles = set()\n\n res = []\n from os import path\n import sys\n if not path.exists(path.join(sys.path[0], map_file_name)):\n raise ValueError(\"Map file does not exist!\")\n\n with open(map_file_name, \"r\") as f:\n for row_num, line in enumerate(f.readlines()):\n res.append([])\n for col_num, char in enumerate(line[:-1]):\n charmap_func = CHARMAP.get(char, lambda *args: \".\")\n\n res[row_num].append(\n charmap_func(col_num, row_num)\n )\n\n return res\n\n def update_visibles(self, x, y):\n vision = queue_crawler(self.dungeon_map, x, y, 5)\n for tile in vision:\n self.visible_tiles.add(tile)\n\n def __print_tile(self, x_pos, y_pos):\n tile_objects = self.__tile_objects.get((x_pos, y_pos), [])\n\n if(\n self.hero is not None and self.hero.get_position() ==\n (x_pos, y_pos)\n ):\n print('H', end='')\n elif tile_objects == []:\n print('.', end='')\n elif self.is_there_enemy((x_pos, y_pos)) is not False:\n e = self.is_there_enemy((x_pos, y_pos))\n if e.cur_spell is not None:\n print(\"B\", end='')\n else:\n print('E', end='')\n elif any([obj == 'G' for obj in tile_objects]):\n print('G', end='')\n else:\n print(\"T\", end='')\n\n def print_map(self):\n for row_num, tile_row in enumerate(self.dungeon_map):\n for col_num, tile in enumerate(self.dungeon_map[row_num]):\n if (col_num, row_num) in self.visible_tiles:\n if tile == \".\":\n self.__print_tile(col_num, row_num)\n else:\n print(tile, end='')\n else:\n print(\" \", end='')\n print()\n print()\n\n def spawn(self, hero):\n self.hero = hero\n for y, row in enumerate(self.dungeon_map):\n for x, cell in enumerate(row):\n if \"S\" in self.__tile_objects.get((x, y), []):\n self.__tile_objects[(x, y)].remove(\"S\")\n self.hero.set_position(x=x, y=y)\n self.update_visibles(x, y)\n return True\n\n return False\n\n def move_helper(self, human, delta_x, delta_y):\n if not isinstance(human, Human):\n raise ValueError(\"human must be of type Human\")\n old_x_pos, old_y_pos = human.get_position()\n\n new_x_pos = old_x_pos + delta_x\n new_y_pos = old_y_pos + delta_y\n\n if (\n new_x_pos in range(len(self.dungeon_map[0])) and\n new_y_pos in range(len(self.dungeon_map)) and\n (\n self.dungeon_map[new_y_pos][new_x_pos] == '.' or\n self.dungeon_map[new_y_pos][new_x_pos] == 'G'\n )\n ):\n # update human position\n if type(human) is Enemy:\n self.__tile_objects[(old_x_pos, old_y_pos)].remove(human)\n if (new_x_pos, new_y_pos) not in self.__tile_objects:\n self.__tile_objects[(new_x_pos, new_y_pos)] = []\n self.__tile_objects[(new_x_pos, new_y_pos)].append(human)\n\n if type(human) is Hero:\n self.update_visibles(new_x_pos, new_y_pos)\n\n human.set_position(x=new_x_pos, y=new_y_pos)\n return True\n\n return False\n\n def move_human(self, human=None, direction=None):\n possible_moves = {\n \"up\": (0, -1),\n \"down\": (0, 1),\n \"left\": (-1, 0),\n \"right\": (1, 0)\n }\n\n move = possible_moves.get(direction, False)\n\n if move:\n return self.move_helper(human, *move)\n\n return False\n\n def can_move(self, x, y):\n if(\n x in range(len(self.dungeon_map[0])) and\n y in range(len(self.dungeon_map)) and\n self.dungeon_map[y][x] == '.'\n ):\n return True\n else:\n return False\n\n def find_enemy(self, x, y, dir_filter, steps):\n\n # print(dir_filter, x, y, steps)\n diag_neightbours = {\n 0: [1, 3],\n 2: [1, 5],\n 4: [],\n 6: [3, 7],\n 8: [7, 5]\n }\n\n if steps == 0:\n return []\n\n # evalate direct moves\n for direct in range(1, 8, 2):\n\n is_move_valid = dir_filter[direct]\n y_delta = direct // 3 - 1\n x_delta = direct % 3 - 1\n\n if is_move_valid:\n dir_filter[direct] = self.can_move(x + x_delta, y + y_delta)\n\n # evaluate diagonal moves based on the direct ones\n for diag in range(0, 9, 2):\n\n is_move_valid = dir_filter[diag]\n y_delta = diag // 3 - 1\n x_delta = diag % 3 - 1\n\n direct_neightbours = [\n dir_filter[index] for index in diag_neightbours[diag]\n ]\n\n if is_move_valid:\n if any(direct_neightbours):\n dir_filter[diag] = self.can_move(x + x_delta, y + y_delta)\n else:\n dir_filter[diag] = False\n\n objects = self.__tile_objects.get((x, y), [])\n\n if any([type(obj) is Enemy for obj in objects]):\n return [(x, y)]\n\n possible_paths = {\"dummy\": []}\n\n for i, is_move_valid in enumerate(dir_filter):\n y_delta = i // 3 - 1\n x_delta = i % 3 - 1\n\n if is_move_valid:\n\n path_from_dir = self.find_enemy(\n x + x_delta,\n y + y_delta,\n dir_filter[:],\n steps - 1\n )\n\n dir_filter[i] = False\n\n if path_from_dir != []:\n destination = path_from_dir[0]\n if destination in possible_paths:\n if len(path_from_dir) < len(possible_paths[destination]):\n possible_paths[destination] = path_from_dir\n else:\n possible_paths[destination] = path_from_dir\n\n possible_diff_paths = possible_paths.values()\n\n longest_path = sorted(possible_diff_paths, key=lambda x: len(x))[-1]\n\n if longest_path != []:\n longest_path.append((x, y))\n return longest_path\n\n return []\n\n def cast_spell(self, caster):\n if not isinstance(caster, Human):\n raise TypeError()\n if not caster.can_cast():\n return True\n\n caster_x, caster_y = caster.get_position()\n walk_path = self.find_enemy(\n caster_x,\n caster_y,\n [True, True, True, True, False, True, True, True, True],\n (caster.cur_spell.cast_range + 1)\n )\n\n # print(walk_path) # PRINTING WALKPATH HEEEERE\n\n if walk_path != [] and walk_path is not None:\n objects = self.__tile_objects[walk_path[0]]\n for obj in objects:\n if type(obj) is Enemy:\n target = obj\n break\n\n return {\n \"hero\": caster,\n \"enemy\": target,\n \"walk_path\": walk_path,\n \"cur_dungeon\": self\n }\n\n else:\n return False\n\n def is_there_enemy(self, direction):\n objects = self.__tile_objects.get(direction, [])\n\n for obj in objects:\n if type(obj) is Enemy:\n return obj\n return False\n\n def delete_enemy(self, direction):\n objects = self.__tile_objects.get(direction, [])\n\n for obj in objects:\n if type(obj) is Enemy:\n self.__tile_objects[direction].remove(obj)\n\n def is_there_gateway(self, direction):\n if \"G\" in self.__tile_objects.get(direction, []):\n return 'G'\n return False\n\n def is_there_treasure(self, direction):\n objects = self.__tile_objects.get(direction, [])\n\n for obj in objects:\n if type(obj) is Weapon or type(obj) is Spell or type(obj) is Potion:\n return obj\n return False\n\n def delete_treasure(self, direction):\n objects = self.__tile_objects.get(direction, [])\n\n for obj in objects:\n if type(obj) is Weapon or type(obj) is Spell or type(obj) is Potion:\n self.__tile_objects[direction].remove(obj)\n","sub_path":"dungeon.py","file_name":"dungeon.py","file_ext":"py","file_size_in_byte":12829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"397232694","text":"import torch\nfrom torch.utils.data import DataLoader\nfrom waveflow.data import LJspeechDataset, collate_fn, collate_fn_synthesize\nfrom waveflow.model import WaveFlow\nimport numpy as np\nimport librosa\nimport os\nimport argparse\nimport time\n\ntorch.backends.cudnn.benchmark = False\nnp.set_printoptions(precision=4)\ntorch.manual_seed(1111)\n\nparser = argparse.ArgumentParser(description='Train WaveFlow of LJSpeech',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\nparser.add_argument('--data_path', type=str, default='./ljspeech_data',\n help='Dataset Path')\nparser.add_argument('--output_path', type=str, default='./output')\n\nparser.add_argument('--model_name', type=str, default='waveflow', help='Model Name')\nparser.add_argument('--batch_size', '-b', type=int, default=8, help='Batch size.')\nparser.add_argument('--load_step', type=int, default=0, help='Load Step')\n\nparser.add_argument('--cin_channels', type=int, default=80, help='Cin Channels')\nparser.add_argument('--res_channels', type=int, default=64, help='residual channels')\nparser.add_argument('--n_height', type=int, default=64,\n help='Number of height for 2D matrix conversion of 1D waveform. notated as h.')\nparser.add_argument('--n_layer', type=int, default=8, help='Number of layers')\nparser.add_argument('--n_flow', type=int, default=8, help='Number of layers')\nparser.add_argument('--n_layer_per_cycle', type=int, default=5,\n help=\"number of layers inside a single flow for height dilation cycle.\"\n \"ex: 3 with --n_layer=8 equals [1 2 4 1 2 4 1 2]\"\n \"ex2: 5 with --n_layer=8 equals [1 2 4 8 16 1 2 4]\")\n\nparser.add_argument('--num_workers', type=int, default=0, help='Number of workers')\nparser.add_argument('--num_gpu', type=int, default=1, help='Number of GPUs to use. >1 uses DataParallel')\n\nparser.add_argument('--num_samples', type=int, default=10, help='# of audio samples')\nparser.add_argument('--temp', type=float, default=1.0, help='Temperature')\n\n\nargs = parser.parse_args()\n\n# auto-complete additional args for output subfolders\nargs.sample_path = os.path.join(args.output_path, 'samples')\nargs.param_path = os.path.join(args.output_path, 'params')\nargs.log_path = os.path.join(args.output_path, 'log')\nargs.loss_path = os.path.join(args.output_path, 'loss')\n\n# Init logger\nif not os.path.isdir(args.log_path):\n os.makedirs(args.log_path)\nif not os.path.isdir(os.path.join(args.log_path, args.model_name)):\n os.makedirs(os.path.join(args.log_path, args.model_name))\n\n# Checkpoint dir\nif not os.path.isdir(args.param_path):\n os.makedirs(args.param_path)\nif not os.path.isdir(args.loss_path):\n os.makedirs(args.loss_path)\nif not os.path.isdir(args.sample_path):\n os.makedirs(args.sample_path)\nif not os.path.isdir(os.path.join(args.sample_path, args.model_name)):\n os.makedirs(os.path.join(args.sample_path, args.model_name))\nif not os.path.isdir(os.path.join(args.param_path, args.model_name)):\n os.makedirs(os.path.join(args.param_path, args.model_name))\n\nuse_cuda = torch.cuda.is_available()\ndevice = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\n# LOAD DATASETS\ntrain_dataset = LJspeechDataset(args.data_path, True, 0.1)\ntest_dataset = LJspeechDataset(args.data_path, False, 0.1)\ntrain_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, collate_fn=collate_fn,\n num_workers=args.num_workers, pin_memory=True)\ntest_loader = DataLoader(test_dataset, batch_size=args.batch_size, collate_fn=collate_fn,\n num_workers=args.num_workers, pin_memory=True)\nsynth_loader = DataLoader(test_dataset, batch_size=1, collate_fn=collate_fn_synthesize,\n num_workers=args.num_workers, pin_memory=True)\n\n\ndef build_model():\n model = WaveFlow(in_channel=1,\n cin_channel=args.cin_channels,\n res_channel=args.res_channels,\n n_height=args.n_height,\n n_flow=args.n_flow,\n n_layer=args.n_layer,\n layers_per_dilation_h_cycle=args.n_layer_per_cycle,\n )\n return model\n\n\ndef synthesize(model):\n global global_step\n model.eval()\n for batch_idx, (x, c) in enumerate(synth_loader):\n if batch_idx < args.num_samples:\n x, c = x.to(device), c.to(device)\n\n start_time = time.time()\n with torch.no_grad():\n y_gen = model.reverse(c, args.temp).squeeze()\n\n wav = y_gen.to(torch.device(\"cpu\")).data.numpy()\n wav_name = '{}/{}/generate_{}_{}.wav'.format(args.sample_path, args.model_name, global_step, batch_idx)\n print('{} seconds'.format(time.time() - start_time))\n librosa.output.write_wav(wav_name, wav, sr=22050)\n print('{} Saved!'.format(wav_name))\n\n\ndef load_checkpoint(step, model):\n checkpoint_path = os.path.join(args.param_path, args.model_name, \"checkpoint_step{:09d}.pth\".format(step))\n print(\"Load checkpoint from: {}\".format(checkpoint_path))\n checkpoint = torch.load(checkpoint_path)\n # generalized load procedure for both single-gpu and DataParallel models\n # https://discuss.pytorch.org/t/solved-keyerror-unexpected-key-module-encoder-embedding-weight-in-state-dict/1686/3\n try:\n model.load_state_dict(checkpoint[\"state_dict\"], strict=False)\n except RuntimeError:\n print(\"INFO: this model is trained with DataParallel. Creating new state_dict without module...\")\n state_dict = checkpoint[\"state_dict\"]\n from collections import OrderedDict\n new_state_dict = OrderedDict()\n for k, v in state_dict.items():\n name = k[7:] # remove `module.`\n new_state_dict[name] = v\n model.load_state_dict(new_state_dict)\n return model\n\n\nif __name__ == \"__main__\":\n step = args.load_step\n global_step = step\n model = build_model()\n model = load_checkpoint(step, model)\n model = model.to(device)\n model.eval()\n\n with torch.no_grad():\n synthesize(model)\n","sub_path":"synthesize.py","file_name":"synthesize.py","file_ext":"py","file_size_in_byte":6139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"133462242","text":"# -*- coding: utf-8 -*-\n\n\"\"\" Sahana Eden Budget Model\n\n @copyright: 2009-2013 (c) Sahana Software Foundation\n @license: MIT\n\n Permission is hereby granted, free of charge, to any person\n obtaining a copy of this software and associated documentation\n files (the \"Software\"), to deal in the Software without\n restriction, including without limitation the rights to use,\n copy, modify, merge, publish, distribute, sublicense, and/or sell\n copies of the Software, and to permit persons to whom the\n Software is furnished to do so, subject to the following\n conditions:\n\n The above copyright notice and this permission notice shall be\n included in all copies or substantial portions of the Software.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES\n OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\n WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR\n OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\n\n__all__ = [\"S3BudgetModel\",\n \"S3BudgetKitModel\",\n \"S3BudgetBundleModel\",\n \"budget_rheader\",\n ]\n\nfrom gluon import *\nfrom gluon.dal import Row\nfrom gluon.storage import Storage\nfrom ..s3 import *\nfrom s3layouts import S3AddResourceLink\n\n# =============================================================================\nclass S3BudgetModel(S3Model):\n\n names = [\"budget_budget\",\n \"budget_parameter\",\n \"budget_location\",\n \"budget_budget_id\",\n \"budget_location_id\",\n \"budget_staff\",\n \"budget_budget_staff\",\n \"budget_staff_id\",\n ]\n\n def model(self):\n\n T = current.T\n configure = self.configure\n define_table = self.define_table\n add_components = self.add_components\n\n s3 = current.response.s3\n crud_strings = s3.crud_strings\n\n db = current.db\n\n # ---------------------------------------------------------------------\n # Budgets\n #\n tablename = \"budget_budget\"\n table = define_table(tablename,\n Field(\"name\",\n length = 128,\n notnull = True,\n unique = True,\n #requires = [IS_NOT_EMPTY(),\n #IS_NOT_ONE_OF(db, \"%s.name\" % tablename),\n #],\n label = T(\"Name\"),\n ),\n Field(\"description\",\n label = T(\"Description\"),\n ),\n Field(\"total_onetime_costs\", \"double\",\n label = T(\"Total One-time Costs\"), \n writable = False,\n ),\n Field(\"total_recurring_costs\", \"double\",\n label = T(\"Total Recurring Costs\"),\n writable = False,\n ),\n s3_comments(),\n *s3_meta_fields()\n )\n\n # CRUD Strings\n ADD_BUDGET = T(\"Add Budget\")\n crud_strings[tablename] = Storage(\n title_create = ADD_BUDGET,\n title_display = T(\"Budget Details\"),\n title_list = T(\"Budgets\"),\n title_update = T(\"Edit Budget\"),\n title_search = T(\"Search Budgets\"),\n subtitle_create = T(\"Add New Budget\"),\n label_list_button = T(\"List Budgets\"),\n label_create_button = ADD_BUDGET,\n label_delete_button = T(\"Delete Budget\"),\n msg_record_created = T(\"Budget added\"),\n msg_record_modified = T(\"Budget updated\"),\n msg_record_deleted = T(\"Budget deleted\"),\n msg_list_empty = T(\"No Budgets currently registered\"),\n )\n\n # Represent\n budget_budget_represent = S3Represent(lookup=tablename)\n\n # Reusable Field\n budget_budget_id = S3ReusableField(\"budget_id\", table,\n requires = IS_ONE_OF(db, \"budget_budget.id\",\n budget_budget_represent,\n ),\n represent = budget_budget_represent,\n label = T(\"Budget\"),\n comment = S3AddResourceLink(\n c = \"budget\",\n f = \"budget\",\n label = T(\"Add Budget\"),\n title = T(\"Budget\"),\n tooltip = T(\"You can add a new budget by clicking link 'Add Budget'.\")\n ),\n ondelete = \"CASCADE\",\n )\n\n add_components(tablename,\n # Staff\n budget_staff={\"link\": \"budget_budget_staff\",\n \"joinby\": \"budget_id\",\n \"key\": \"staff_id\",\n \"actuate\": \"link\",\n },\n # Bundles\n budget_bundle={\"link\": \"budget_budget_bundle\",\n \"joinby\": \"budget_id\",\n \"key\": \"bundle_id\",\n \"actuate\": \"link\",\n },\n )\n\n # @todo: budget_budget_onaccept?\n\n # ---------------------------------------------------------------------\n # Parameters (unused?)\n #\n tablename = \"budget_parameter\"\n table = define_table(tablename,\n Field(\"shipping\", \"double\",\n default = 15.0,\n requires = IS_FLOAT_IN_RANGE(0, 100),\n notnull = True,\n label = T(\"Shipping cost\"),\n ),\n Field(\"logistics\", \"double\",\n default = 0.0,\n requires = IS_FLOAT_IN_RANGE(0, 100),\n notnull = True,\n label = T(\"Procurement & Logistics cost\"),\n ),\n Field(\"admin\", \"double\",\n default = 0.0,\n requires = IS_FLOAT_IN_RANGE(0, 100),\n notnull = True,\n label = T(\"Administrative support cost\"),\n ),\n Field(\"indirect\", \"double\",\n default = 7.0,\n requires = IS_FLOAT_IN_RANGE(0, 100),\n notnull = True,\n label = T(\"Indirect support cost HQ\"),\n ),\n *s3_meta_fields())\n\n # CRUD Strings\n crud_strings[tablename] = Storage(\n title_update = T(\"Edit Parameters\"),\n title_display = T(\"Parameters\")\n )\n\n # ---------------------------------------------------------------------\n # Locations\n #\n tablename = \"budget_location\"\n table = define_table(tablename,\n Field(\"code\",\n length = 3,\n notnull = True,\n unique = True,\n #requires = [IS_NOT_EMPTY(),\n #IS_NOT_ONE_OF(db, \"%s.code\" % tablename),\n #],\n label = T(\"Code\"),\n ),\n Field(\"description\",\n label = T(\"Description\"),\n ),\n Field(\"subsistence\", \"double\",\n default = 0.0,\n label = T(\"Subsistence Cost\"),\n # UN terminology:\n #label = \"DSA\",\n ),\n Field(\"hazard_pay\", \"double\",\n default = 0.0,\n label = T(\"Hazard Pay\"),\n ),\n s3_comments(),\n *s3_meta_fields())\n\n # CRUD Strings\n ADD_LOCATION = T(\"Add Location\")\n crud_strings[tablename] = Storage(\n title_create = ADD_LOCATION,\n title_display = T(\"Location Details\"),\n title_list = T(\"Locations\"),\n title_update = T(\"Edit Location\"),\n title_search = T(\"Search Locations\"),\n subtitle_create = T(\"Add New Location\"),\n label_list_button = T(\"List Locations\"),\n label_create_button = ADD_LOCATION,\n label_delete_button = T(\"Delete Location\"),\n msg_record_created = T(\"Location added\"),\n msg_record_modified = T(\"Location updated\"),\n msg_record_deleted = T(\"Location deleted\"),\n msg_list_empty = T(\"No Locations currently registered\"),\n )\n\n # Represent\n budget_location_represent = S3Represent(lookup=tablename,\n fields=[\"code\"])\n\n # Reusable Field\n budget_location_id = S3ReusableField(\"location_id\", table,\n requires = IS_ONE_OF(db, \"budget_location.id\",\n budget_location_represent,\n ),\n represent = budget_location_represent,\n label = T(\"Location\"),\n comment = S3AddResourceLink(\n c = \"budget\",\n f = \"location\",\n label = T(\"Add Location\"),\n title = T(\"Location\"),\n tooltip = T(\"You can add a new location by clicking link 'Add Location'.\")\n ),\n ondelete = \"CASCADE\",\n )\n\n # @todo: have an onaccept to update all budgets with\n # staff at this location\n\n # ---------------------------------------------------------------------\n # Staff Types\n #\n tablename = \"budget_staff\"\n table = define_table(tablename,\n Field(\"name\", length=128,\n notnull=True,\n unique=True,\n #requires = [IS_NOT_EMPTY(),\n #IS_NOT_ONE_OF(db, \"%s.name\" % tablename),\n #],\n label = T(\"Name\"),\n ),\n Field(\"grade\",\n notnull=True,\n requires = IS_NOT_EMPTY(),\n label = T(\"Grade\"),\n ),\n Field(\"salary\", \"integer\",\n notnull=True,\n requires = IS_NOT_EMPTY(),\n label = T(\"Monthly Salary\"),\n ),\n s3_currency(),\n Field(\"travel\", \"integer\",\n default=0,\n label = T(\"Travel Cost\"),\n ),\n # Shouldn't be grade-dependent, but purely\n # location-dependent\n #Field(\"subsistence\", \"double\",\n # default=0.00,\n # ),\n # Location-dependent\n #Field(\"hazard_pay\", \"double\",\n # default=0.00,\n # ),\n s3_comments(),\n *s3_meta_fields())\n\n # CRUD Strings\n ADD_STAFF_TYPE = T(\"Add Staff Type\")\n crud_strings[tablename] = Storage(\n title_create = ADD_STAFF_TYPE,\n title_display = T(\"Staff Type Details\"),\n title_list = T(\"Staff Types\"),\n title_update = T(\"Edit Staff Type\"),\n title_search = T(\"Search Staff Types\"),\n subtitle_create = T(\"Add New Staff Type\"),\n label_list_button = T(\"List Staff Types\"),\n label_create_button = ADD_STAFF_TYPE,\n label_delete_button = T(\"Delete Staff Type\"),\n msg_record_created = T(\"Staff Type added\"),\n msg_record_modified = T(\"Staff Type updated\"),\n msg_record_deleted = T(\"Staff Type deleted\"),\n msg_list_empty = T(\"No Staff Types currently registered\"),\n )\n\n # Represent\n budget_staff_represent = S3Represent(lookup=tablename,\n fields=[\"name\"])\n\n # Reusable Field\n budget_staff_id = S3ReusableField(\"staff_id\", table,\n requires = IS_ONE_OF(db, \"budget_staff.id\",\n budget_staff_represent,\n ),\n represent = budget_staff_represent,\n label = T(\"Staff\"),\n comment = S3AddResourceLink(\n c = \"budget\",\n f = \"staff\",\n label = T(\"Add Staff\"),\n title = T(\"Staff\"),\n tooltip = T(\"You can add new staff by clicking link 'Add Staff'.\")\n ),\n ondelete = \"RESTRICT\",\n )\n\n # @todo: have an onaccept to update totals of all\n # budgets with this staff type\n\n # ---------------------------------------------------------------------\n # Budget<>Staff Many2Many @todo: cleanup\n #\n tablename = \"budget_budget_staff\"\n table = define_table(tablename,\n budget_budget_id(),\n self.project_project_id(),\n budget_location_id(),\n budget_staff_id(),\n Field(\"quantity\", \"integer\",\n requires = IS_NOT_EMPTY(),\n label = T(\"Quantity\"),\n default=1,\n notnull=True,\n ),\n Field(\"months\", \"integer\",\n requires = IS_NOT_EMPTY(),\n label = T(\"Months\"),\n default=3,\n notnull=True,\n ),\n *s3_meta_fields())\n\n # @todo: have an onaccept to update the totals in the budget\n\n # ---------------------------------------------------------------------\n # Pass names back to global scope (s3.*)\n #\n return dict(budget_budget_id = budget_budget_id,\n budget_location_id = budget_location_id,\n budget_staff_id=budget_staff_id,\n )\n\n # -------------------------------------------------------------------------\n def defaults(self):\n \"\"\"\n Safe defaults for model-global names in case module is disabled\n \"\"\"\n\n budget_budget_id = S3ReusableField(\"budget_id\", \"integer\",\n readable=False,\n writable=False)\n budget_location_id = S3ReusableField(\"location_id\", \"integer\",\n readable=False,\n writable=False)\n budget_staff_id = S3ReusableField(\"staff_id\", \"integer\",\n readable=False,\n writable=False)\n \n return dict(budget_budget_id = budget_budget_id,\n budget_location_id = budget_location_id,\n budget_staff_id=budget_staff_id,\n )\n\n# =============================================================================\nclass S3BudgetKitModel(S3Model):\n\n names = [\"budget_kit\",\n \"budget_item\",\n \"budget_kit_item\",\n \"budget_kit_id\",\n \"budget_item_id\",\n ]\n \n def model(self):\n\n T = current.T\n configure = self.configure\n define_table = self.define_table\n add_components = self.add_components\n\n s3 = current.response.s3\n crud_strings = s3.crud_strings\n \n UNKNOWN_OPT = current.messages.UNKNOWN_OPT\n \n db = current.db\n \n # ---------------------------------------------------------------------\n # Kits\n #\n tablename = \"budget_kit\"\n table = define_table(tablename,\n Field(\"code\",\n length=128,\n notnull=True,\n unique=True,\n #requires = [IS_NOT_EMPTY(),\n #IS_NOT_ONE_OF(db, \"%s.code\" % tablename),\n #],\n label = T(\"Code\"),\n ),\n Field(\"description\",\n label = T(\"Description\"),\n ),\n Field(\"total_unit_cost\", \"double\",\n writable=False,\n label = T(\"Total Unit Cost\"),\n ),\n Field(\"total_monthly_cost\", \"double\",\n writable=False,\n label = T(\"Total Monthly Cost\"),\n ),\n Field(\"total_minute_cost\", \"double\",\n writable=False,\n label = T(\"Total Cost per Minute\"),\n ),\n Field(\"total_megabyte_cost\", \"double\",\n writable=False,\n label = T(\"Total Cost per Megabyte\"),\n ),\n s3_comments(),\n *s3_meta_fields())\n\n # CRUD Strings\n ADD_KIT = T(\"Add Kit\")\n crud_strings[tablename] = Storage(\n title_create = ADD_KIT,\n title_display = T(\"Kit Details\"),\n title_list = T(\"Kits\"),\n title_update = T(\"Edit Kit\"),\n title_search = T(\"Search Kits\"),\n subtitle_create = T(\"Add New Kit\"),\n label_list_button = T(\"List Kits\"),\n label_create_button = ADD_KIT,\n label_delete_button = T(\"Delete Kit\"),\n msg_record_created = T(\"Kit added\"),\n msg_record_modified = T(\"Kit updated\"),\n msg_record_deleted = T(\"Kit deleted\"),\n msg_list_empty = T(\"No Kits currently registered\"),\n )\n\n # Represent\n budget_kit_represent = S3Represent(lookup=tablename, fields=[\"code\"])\n \n # Reusable Field\n budget_kit_id = S3ReusableField(\"kit_id\", table,\n requires = IS_ONE_OF(db, \"budget_kit.id\",\n budget_kit_represent,\n ),\n represent = budget_kit_represent,\n label = T(\"Kit\"),\n comment = S3AddResourceLink(\n c = \"budget\",\n f = \"kit\",\n label = T(\"Add kit\"),\n title = T(\"Kit\"),\n tooltip = T(\"You can add a new kit by clicking link 'Add Kit'.\")\n ),\n ondelete = \"RESTRICT\",\n )\n\n # Components\n add_components(tablename,\n # Items\n budget_item={\"link\": \"budget_kit_item\",\n \"joinby\": \"kit_id\",\n \"key\": \"item_id\",\n \"actuate\": \"link\",\n },\n )\n\n # ---------------------------------------------------------------------\n # Items @todo: cleanup\n #\n budget_cost_type_opts = {1:T(\"One-time\"),\n 2:T(\"Recurring\"),\n }\n \n budget_category_type_opts = {1:T(\"Consumable\"),\n 2:T(\"Satellite\"),\n 3:\"HF\",\n 4:\"VHF\",\n 5:T(\"Telephony\"),\n 6:\"WLAN\",\n 7:T(\"Network\"),\n 8:T(\"Generator\"),\n 9:T(\"Electrical\"),\n 10:T(\"Vehicle\"),\n 11:\"GPS\",\n 12:T(\"Tools\"),\n 13:\"IT\",\n 14:\"ICT\",\n 15:\"TC\",\n 16:T(\"Stationery\"),\n 17:T(\"Relief\"),\n 18:T(\"Miscellaneous\"),\n 19:T(\"Running Cost\"),\n }\n \n tablename = \"budget_item\"\n table = define_table(tablename,\n Field(\"category_type\", \"integer\",\n notnull=True,\n requires = IS_IN_SET(budget_category_type_opts, zero=None),\n #default = 1,\n label = T(\"Category\"),\n represent = lambda opt: \\\n budget_category_type_opts.get(opt, UNKNOWN_OPT)\n ),\n Field(\"code\",\n length=128,\n notnull=True,\n unique=True,\n #requires = [IS_NOT_EMPTY(),\n #IS_NOT_ONE_OF(db, \"%s.code\" % tablename),\n #],\n label = T(\"Code\"),\n ),\n Field(\"description\",\n notnull=True,\n requires = IS_NOT_EMPTY(),\n label = T(\"Description\"),\n ),\n Field(\"cost_type\", \"integer\",\n notnull=True,\n requires = IS_IN_SET(budget_cost_type_opts,\n zero=None),\n #default = 1,\n label = T(\"Cost Type\"),\n represent = lambda opt: \\\n budget_cost_type_opts.get(opt, UNKNOWN_OPT)\n ),\n Field(\"unit_cost\", \"double\",\n default=0.00,\n label = T(\"Unit Cost\"),\n ),\n Field(\"monthly_cost\", \"double\",\n default=0.00,\n label = T(\"Monthly Cost\"),\n ),\n Field(\"minute_cost\", \"double\",\n default=0.00,\n label = T(\"Cost per Minute\"),\n ),\n Field(\"megabyte_cost\", \"double\",\n default=0.00,\n label = T(\"Cost per Megabyte\"),\n ),\n s3_comments(),\n *s3_meta_fields())\n\n # CRUD Strings\n ADD_ITEM = T(\"Add Item\")\n crud_strings[tablename] = Storage(\n title_create = ADD_ITEM,\n title_display = T(\"Item Details\"),\n title_list = T(\"Items\"),\n title_update = T(\"Edit Item\"),\n title_search = T(\"Search Items\"),\n subtitle_create = T(\"Add New Item\"),\n label_list_button = T(\"List Items\"),\n label_create_button = ADD_ITEM,\n label_delete_button = T(\"Delete Item\"),\n label_search_button = T(\"Search Items\"),\n msg_record_created = T(\"Item added\"),\n msg_record_modified = T(\"Item updated\"),\n msg_record_deleted = T(\"Item deleted\"),\n msg_list_empty = T(\"No Items currently registered\"),\n )\n\n # Configuration\n configure(tablename,\n onaccept=budget_kit_onaccept,\n main=\"code\",\n extra=\"description\",\n orderby=table.category_type,\n )\n\n # Represent\n budget_item_represent = S3Represent(lookup=tablename,\n fields=[\"description\"])\n\n # Reusable Field\n budget_item_id = S3ReusableField(\"item_id\", table,\n requires = IS_ONE_OF(db, \"budget_item.id\",\n budget_item_represent,\n ),\n represent = budget_item_represent,\n label = T(\"Item\"),\n comment = S3AddResourceLink(\n c = \"budget\",\n f = \"item\",\n label = T(\"Add item\"),\n title = T(\"Item\"),\n tooltip = T(\"You can add a new item by clicking link 'Add Item'.\")\n ),\n ondelete = \"RESTRICT\",\n )\n\n # @todo: have an onaccept to update all kits and bundles with this item\n\n # ---------------------------------------------------------------------\n # Kit<>Item Many2Many\n #\n tablename = \"budget_kit_item\"\n table = define_table(tablename,\n budget_kit_id(),\n budget_item_id(),\n Field(\"quantity\", \"integer\",\n default=1,\n notnull=True,\n requires = IS_NOT_EMPTY(),\n label = T(\"Quantity\"),\n ),\n *s3_meta_fields())\n\n configure(tablename,\n onaccept = self.kit_item_onaccept,\n )\n\n # ---------------------------------------------------------------------\n # Pass names back to global scope (s3.*)\n #\n return dict(budget_kit_id = budget_kit_id,\n budget_item_id = budget_item_id,\n )\n\n # -------------------------------------------------------------------------\n @staticmethod\n def kit_item_onaccept(form):\n \"\"\"\n When an Item is updated, then also need to update all Kits,\n Bundles & Budgets which contain this item\n\n @todo: cleanup, rename into item_onaccept\n \"\"\"\n\n db = current.db\n s3db = current.s3db\n \n # Check if we're an update form\n if form.vars.id:\n item = form.vars.id\n \n # Update Kits containing this Item\n table = s3db.budget_kit_item\n query = table.item_id==item\n rows = db(query).select()\n \n for row in rows:\n kit = row.kit_id\n budget_kit_totals(kit)\n \n # Update Bundles containing this Kit\n table = s3db.budget_bundle_kit\n query = (table.kit_id == kit)\n rows = db(query).select()\n for row in rows:\n bundle = row.bundle_id\n budget_bundle_totals(bundle)\n # Update Budgets containing this Bundle (tbc)\n \n # Update Bundles containing this Item\n table = s3db.budget_bundle_item\n query = (table.item_id == item)\n rows = db(query).select()\n for row in rows:\n bundle = row.bundle_id\n budget_bundle_totals(bundle)\n # Update Budgets containing this Bundle (tbc)\n return\n\n# =============================================================================\nclass S3BudgetBundleModel(S3Model):\n \"\"\" Model for Budget Bundles \"\"\"\n\n names = [\"budget_bundle\",\n \"budget_bundle_kit\",\n \"budget_bundle_item\",\n \"budget_budget_bundle\",\n \"budget_bundle_id\",\n ]\n\n def model(self):\n\n T = current.T\n configure = self.configure\n define_table = self.define_table\n add_components = self.add_components\n\n s3 = current.response.s3\n crud_strings = s3.crud_strings\n\n db = current.db\n \n # ---------------------------------------------------------------------\n # Bundles\n #\n tablename = \"budget_bundle\"\n table = define_table(tablename,\n Field(\"name\",\n length=128,\n notnull=True,\n unique=True,\n #requires = [IS_NOT_EMPTY(),\n #IS_NOT_ONE_OF(db, \"%s.name\" % tablename),\n #],\n label = T(\"Name\"),\n ),\n Field(\"description\",\n label = T(\"Description\"),\n ),\n Field(\"total_unit_cost\", \"double\",\n writable=False,\n label = T(\"One time cost\"),\n ),\n Field(\"total_monthly_cost\", \"double\",\n writable=False,\n label = T(\"Recurring cost\"),\n ),\n s3_comments(),\n *s3_meta_fields())\n\n # CRUD Strings\n ADD_BUNDLE = T(\"Add Bundle\")\n crud_strings[tablename] = Storage(\n title_create = ADD_BUNDLE,\n title_display = T(\"Bundle Details\"),\n title_list = T(\"Bundles\"),\n title_update = T(\"Edit Bundle\"),\n title_search = T(\"Search Bundles\"),\n subtitle_create = T(\"Add New Bundle\"),\n label_list_button = T(\"List Bundles\"),\n label_create_button = ADD_BUNDLE,\n label_delete_button = T(\"Delete Bundle\"),\n msg_record_created = T(\"Bundle added\"),\n msg_record_modified = T(\"Bundle updated\"),\n msg_record_deleted = T(\"Bundle deleted\"),\n msg_list_empty = T(\"No Bundles currently registered\"),\n )\n\n # Configuration\n configure(tablename,\n onaccept = self.budget_bundle_onaccept)\n\n # Components\n add_components(tablename,\n # Items\n budget_item={\"link\": \"budget_bundle_item\",\n \"joinby\": \"bundle_id\",\n \"key\": \"item_id\",\n \"actuate\": \"link\",\n },\n # Kits\n budget_kit={\"link\": \"budget_bundle_kit\",\n \"joinby\": \"bundle_id\",\n \"key\": \"kit_id\",\n \"actuate\": \"link\",\n },\n )\n\n # Represent\n budget_bundle_represent = S3Represent(lookup=tablename,\n fields=[\"name\"])\n\n # Reusable Field\n budget_bundle_id = S3ReusableField(\"bundle_id\", table,\n requires = IS_ONE_OF(db, \"budget_bundle.id\",\n budget_bundle_represent,\n ),\n represent = budget_bundle_represent,\n label = T(\"Bundle\"),\n comment = S3AddResourceLink(\n c = \"budget\",\n f = \"bundle\",\n label = T(\"Add Bundle\"),\n title = T(\"Bundle\"),\n tooltip = T(\"You can add a new bundle by clicking link 'Add Bundle'.\")\n ),\n ondelete = \"RESTRICT\",\n )\n\n # ---------------------------------------------------------------------\n # Bundle<>Kit Many2Many\n #\n tablename = \"budget_bundle_kit\"\n table = define_table(tablename,\n budget_bundle_id(),\n self.budget_kit_id(),\n Field(\"quantity\", \"integer\",\n default=1,\n notnull=True,\n requires = IS_NOT_EMPTY(),\n label = T(\"Quantity\"),\n ),\n Field(\"minutes\", \"integer\",\n default=0,\n notnull=True,\n requires = IS_NOT_EMPTY(),\n label = T(\"Minutes per Month\"),\n ),\n Field(\"megabytes\", \"integer\",\n default=0,\n notnull=True,\n requires = IS_NOT_EMPTY(),\n label = T(\"Megabytes per Month\"),\n ),\n *s3_meta_fields())\n\n # @todo: CRUD Strings\n\n # Configuration\n configure(tablename,\n onaccept = self.budget_bundle_kit_onaccept,\n ondelete = self.budget_bundle_kit_ondelete)\n \n # ---------------------------------------------------------------------\n # Bundle<>Item Many2Many\n #\n tablename = \"budget_bundle_item\"\n table = define_table(tablename,\n budget_bundle_id(),\n self.budget_item_id(),\n Field(\"quantity\", \"integer\",\n default=1,\n notnull=True,\n requires = IS_NOT_EMPTY(),\n label = T(\"Quantity\"),\n ),\n Field(\"minutes\", \"integer\",\n default=0,\n notnull=True,\n requires = IS_NOT_EMPTY(),\n label = T(\"Minutes per Month\"),\n ),\n Field(\"megabytes\", \"integer\",\n default=0,\n notnull=True,\n requires = IS_NOT_EMPTY(),\n label = T(\"Megabytes per Month\"),\n ),\n *s3_meta_fields())\n\n # @todo: CRUD Strings\n\n # Configuration\n configure(tablename,\n onaccept = self.budget_bundle_item_onaccept,\n ondelete = self.budget_bundle_item_ondelete)\n\n # ---------------------------------------------------------------------\n # Budget<>Bundle Many2Many\n #\n tablename = \"budget_budget_bundle\"\n table = define_table(tablename,\n self.budget_budget_id(),\n self.project_project_id(),\n self.budget_location_id(),\n budget_bundle_id(),\n Field(\"quantity\", \"integer\",\n default=1,\n notnull=True,\n requires = IS_NOT_EMPTY(),\n label = T(\"Quantity\"),\n ),\n Field(\"months\", \"integer\",\n default=3,\n notnull=True,\n requires = IS_NOT_EMPTY(),\n label = T(\"Months\"),\n ),\n *s3_meta_fields())\n\n # @todo: CRUD Strings\n\n # Configuration\n configure(tablename,\n onaccept = self.budget_budget_bundle_onaccept,\n ondelete = self.budget_budget_bundle_ondelete)\n\n # ---------------------------------------------------------------------\n # Pass names back to global scope (s3.*)\n #\n return dict(budget_bundle_id=budget_bundle_id,\n )\n\n # -------------------------------------------------------------------------\n # @todo: safe defaults\n\n # -------------------------------------------------------------------------\n @staticmethod\n def budget_bundle_onaccept(form):\n \"\"\"\n Calculate totals for the bundle\n \"\"\"\n try:\n bundle_id = form.vars.id\n except:\n return\n budget_bundle_totals(bundle_id)\n return\n\n # -------------------------------------------------------------------------\n @staticmethod\n def budget_bundle_item_onaccept(form):\n \"\"\"\n Bundle item has been updated => update totals of the bundle\n \"\"\"\n\n try:\n bundle_id = form.vars.bundle_id\n except:\n return\n budget_bundle_totals(bundle_id)\n return\n\n # -------------------------------------------------------------------------\n @staticmethod\n def budget_bundle_item_ondelete(row):\n \"\"\"\n Bundle item has been deleted => update totals of the bundle\n \"\"\"\n\n db = current.db\n linktable = current.s3db.budget_bundle_item\n try:\n record_id = row.id\n except:\n return\n link = db(linktable.id == record_id).select(linktable.deleted_fk,\n limitby=(0, 1)).first()\n if link:\n deleted_fk = json.loads(link.deleted_fk)\n bundle_id = deleted_fk.get(\"bundle_id\")\n if bundle_id:\n budget_bundle_totals(bundle_id)\n return\n\n # -------------------------------------------------------------------------\n @staticmethod\n def budget_bundle_kit_onaccept(form):\n \"\"\"\n Bundle kit has been updated => update totals of the bundle\n \"\"\"\n\n try:\n bundle_id = form.vars.bundle_id\n except:\n return\n budget_bundle_totals(bundle_id)\n return\n\n # -------------------------------------------------------------------------\n @staticmethod\n def budget_bundle_kit_ondelete(row):\n \"\"\"\n Bundle kit has been deleted => update totals of the bundle\n \"\"\"\n\n db = current.db\n linktable = current.s3db.budget_bundle_kit\n try:\n record_id = row.id\n except:\n return\n link = db(linktable.id == record_id).select(linktable.deleted_fk,\n limitby=(0, 1)).first()\n if link:\n deleted_fk = json.loads(link.deleted_fk)\n bundle_id = deleted_fk.get(\"bundle_id\")\n if bundle_id:\n budget_bundle_totals(bundle_id)\n return\n\n # -------------------------------------------------------------------------\n @staticmethod\n def budget_budget_bundle_onaccept(form):\n \"\"\"\n Budget bundle has been updated => update totals of the budget\n \"\"\"\n\n try:\n budget_id = form.vars.budget_id\n except:\n return\n budget_budget_totals(budget_id)\n return\n\n # -------------------------------------------------------------------------\n @staticmethod\n def budget_budget_bundle_ondelete(row):\n \"\"\"\n Budget bundle has been deleted => update totals of the budget\n \"\"\"\n\n db = current.db\n linktable = current.s3db.budget_budget_bundle\n try:\n record_id = row.id\n except:\n return\n link = db(linktable.id == record_id).select(linktable.deleted_fk,\n limitby=(0, 1)).first()\n if link:\n deleted_fk = json.loads(link.deleted_fk)\n budget_id = deleted_fk.get(\"budget_id\")\n if budget_id:\n budget_budget_totals(budget_id)\n return\n\n# =============================================================================\ndef budget_budget_total(form):\n \"\"\" Calculate Totals for the budget specified by Form \"\"\"\n\n # @todo: rename into budget_budget_onaccept, move into model\n\n if \"budget_id\" in form.vars:\n # called by budget_staff_bundle()\n budget = form.vars.budget_id\n else:\n # called by budget()\n budget = form.vars.id\n budget_totals(budget)\n\n# =============================================================================\ndef budget_kit_onaccept(form):\n \"\"\"\n Calculate Totals for the Kit specified by Form\n \"\"\"\n # @todo: move into model\n if \"kit_id\" in form.vars:\n # called by kit_item()\n kit = form.vars.kit_id\n else:\n # called by kit()\n kit = form.vars.id\n budget_kit_totals(kit)\n\n# =============================================================================\ndef budget_kit_totals(kit_id):\n \"\"\"\n Calculate Totals for a Kit\n \"\"\"\n\n db = current.db\n s3db = current.s3db\n\n # Lookup all item quantities in this kit\n ltable = s3db.budget_kit_item\n query = (ltable.kit_id == kit_id) & \\\n (ltable.deleted == False)\n items = db(query).select(ltable.item_id, ltable.quantity)\n item_ids = set(item.item_id for item in items)\n\n # Lookup the individual costs of each item\n itable = s3db.budget_item\n query = (itable.id.belongs(item_ids))\n costs = db(query).select(itable.id,\n itable.unit_cost,\n itable.monthly_cost,\n itable.minute_cost,\n itable.megabyte_cost).as_dict()\n\n # Calculate the totals per cost category\n total_unit_cost = 0\n total_monthly_cost = 0\n total_minute_cost = 0\n total_megabyte_cost = 0\n\n for item in items:\n\n quantity = item.quantity\n item_costs = costs[item.item_id]\n \n total_unit_cost += item_costs[\"unit_cost\"] * quantity\n total_monthly_cost += item_costs[\"monthly_cost\"] * quantity\n total_minute_cost += item_costs[\"minute_cost\"] * quantity\n total_megabyte_cost += item_costs[\"megabyte_cost\"] * quantity\n\n # Update the kit\n ktable = s3db.budget_kit\n db(ktable.id == kit_id).update(total_unit_cost=total_unit_cost,\n total_monthly_cost=total_monthly_cost,\n total_minute_cost=total_minute_cost,\n total_megabyte_cost=total_megabyte_cost)\n\n # Update totals in all bundles with this kit\n linktable = s3db.budget_bundle_kit\n bundle_id = linktable.bundle_id\n rows = db(linktable.kit_id == kit_id).select(bundle_id,\n groupby=bundle_id)\n for row in rows:\n budget_bundle_totals(row.bundle_id)\n\n # @todo: fix this\n #audit(\"update\", module, \"kit\", record=kit, representation=\"html\")\n return\n\n# =============================================================================\ndef budget_bundle_totals(bundle_id):\n \"\"\"\n Calculate Totals for a Bundle\n \"\"\"\n\n s3db = current.s3db\n db = current.db\n \n total_unit_cost = 0\n total_monthly_cost = 0\n\n # Calculate costs of kits\n ktable = s3db.budget_kit\n linktable = s3db.budget_bundle_kit\n left = [ktable.on(linktable.kit_id == ktable.id)]\n query = (linktable.bundle_id == bundle_id)\n rows = db(query).select(linktable.quantity,\n linktable.minutes,\n linktable.megabytes,\n ktable.total_unit_cost,\n ktable.total_monthly_cost,\n ktable.total_minute_cost,\n ktable.total_megabyte_cost,\n left=left)\n for row in rows:\n kit = row[ktable]\n link = row[linktable]\n quantity = link.quantity\n\n # One-time costs\n total_unit_cost += kit.total_unit_cost * quantity\n\n # Monthly costs\n monthly_cost = kit.total_monthly_cost + \\\n kit.total_minute_cost * link.minutes + \\\n kit.total_megabyte_cost * link.megabytes\n total_monthly_cost += monthly_cost * quantity\n\n # Calculate costs of items\n itable = s3db.budget_item\n linktable = s3db.budget_bundle_item\n left = [itable.on(linktable.item_id == itable.id)]\n query = (linktable.bundle_id == bundle_id)\n rows = db(query).select(linktable.quantity,\n linktable.minutes,\n linktable.megabytes,\n itable.total_unit_cost,\n itable.total_monthly_cost,\n itable.total_minute_cost,\n itable.total_megabyte_cost,\n left=left)\n for row in rows:\n item = row[itable]\n link = row[linktable]\n quantity = link.quantity\n\n # One-time costs\n total_unit_cost += item.total_unit_cost * quantity\n\n # Monthly costs\n monthly_cost = item.total_monthly_cost + \\\n item.total_minute_cost * link.minutes + \\\n item.total_megabyte_cost * link.megabytes\n total_monthly_cost += monthly_cost * quantity\n\n # Update the bundle\n btable = s3db.budget_bundle\n db(btable.id == bundle_id).update(total_unit_cost=total_unit_cost,\n total_monthly_cost=total_monthly_cost)\n\n # Update totals of all budgets with this bundle\n linktable = s3db.budget_budget_bundle\n budget_id = linktable.budget_id\n rows = db(linktable.bundle_id == bundle_id).select(budget_id,\n groupby=budget_id)\n for row in rows:\n budget_budget_totals(row.budget_id)\n\n # @todo: fix this:\n #audit(\"update\", module, \"bundle\", record=bundle, representation=\"html\")\n return\n\n# =============================================================================\ndef budget_budget_totals(budget_id):\n \"\"\"\n Calculate Totals for a budget\n\n @param budget_id: the budget_budget record ID\n \"\"\"\n\n db = current.db\n s3db = current.s3db\n\n total_onetime_cost = 0\n total_recurring_cost = 0\n\n # Calculate staff costs\n stable = s3db.budget_staff\n ltable = s3db.budget_location\n \n linktable = s3db.budget_budget_staff\n\n left = [stable.on(linktable.staff_id == stable.id),\n ltable.on(linktable.location_id == ltable.id),\n ]\n query = (linktable.budget_id == budget_id)\n rows = db(query).select(linktable.quantity,\n linktable.months,\n stable.salary,\n stable.travel,\n ltable.subsistence,\n ltable.hazard_pay,\n left=left)\n\n for row in rows:\n quantity = row[linktable.quantity]\n\n # Travel costs are one time\n total_onetime_cost += row[stable.travel] * quantity\n\n # Recurring costs are monthly\n recurring_costs = row[stable.salary] + \\\n row[ltable.subsistence] + \\\n row[ltable.hazard_pay]\n total_recurring_cost += recurring_costs * \\\n quantity * \\\n row[linktable.months]\n\n # Calculate bundle costs\n btable = s3db.budget_bundle\n \n linktable = s3db.budget_budget_bundle\n\n left = [btable.on(linktable.bundle_id == btable.id)]\n\n query = (linktable.budget_id == budget_id)\n rows = db(query).select(linktable.quantity,\n linktable.months,\n btable.total_unit_cost,\n btable.total_monthly_cost,\n left=left)\n\n for row in rows:\n quantity = row[linktable.quantity]\n\n total_onetime_cost += row[btable.total_unit_cost] * \\\n quantity\n total_recurring_cost += row[btable.total_monthly_cost] * \\\n quantity * \\\n row[linktable.months]\n\n table = s3db.budget_budget\n db(table.id == budget_id).update(total_onetime_costs=total_onetime_cost,\n total_recurring_costs=total_recurring_cost)\n\n # @todo: fix this\n #audit(\"update\", module, \"budget\", record=budget, representation=\"html\")\n\n# =============================================================================\ndef budget_rheader(r):\n\n T = current.T\n if r.representation != \"html\":\n return None\n\n resourcename = r.name\n\n if resourcename == \"budget\":\n \n tabs = [(T(\"Basic Details\"), None),\n (T(\"Staff\"), \"staff\"),\n (T(\"Bundles\"), \"bundle\"),\n ]\n \n rheader_fields = [[\"name\"],\n [\"description\"],\n [\"total_onetime_costs\"],\n [\"total_recurring_costs\"],\n ]\n rheader = S3ResourceHeader(rheader_fields, tabs)(r)\n \n elif resourcename == \"bundle\":\n\n tabs = [(T(\"Basic Details\"), None),\n (T(\"Kits\"), \"kit\"),\n (T(\"Items\"), \"item\"),\n ]\n\n rheader_fields = [[\"name\"],\n [\"description\"],\n [\"total_unit_cost\"],\n [\"total_monthly_cost\"],\n ]\n rheader = S3ResourceHeader(rheader_fields, tabs)(r)\n\n elif resourcename == \"kit\":\n\n tabs = [(T(\"Basic Details\"), None),\n (T(\"Items\"), \"item\"),\n ]\n\n rheader_fields = [[\"code\"],\n [\"description\"],\n [\"total_unit_cost\"],\n [\"total_monthly_cost\"],\n ]\n rheader = S3ResourceHeader(rheader_fields, tabs)(r)\n\n return rheader\n \n# END =========================================================================\n","sub_path":"modules/s3db/budget.py","file_name":"budget.py","file_ext":"py","file_size_in_byte":54626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"559490682","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\nimport numpy as np\nfrom nltk.translate.bleu_score import sentence_bleu\nfrom nltk.translate.bleu_score import SmoothingFunction\n\n\n# In[ ]:\n\n\nimport csv\nimport json\n\npreds=[]\nrefs=[]\nauth_1 =[]\nauth_2 = []\nauth_3 =[]\nwith open('human_annotations.csv') as csvfile:\n ader = csv.reader(csvfile)\n for row in ader:\n refs.append(row[1])\n preds.append(row[0])\n auth_1.append(row[2])\n auth_2.append(row[3])\n auth_3.append(row[4])\n \nrefs_n = refs[:100]\npreds_n = preds[:100]\n\nauth_1 = auth_1[:100]\nfor i in range(0, len(auth_1)):\n auth_1[i] = int(auth_1[i])\n#print(auth_1)\n\nauth_2 = auth_2[:100]\nfor i in range(0, len(auth_2)):\n auth_2[i] = int(auth_2[i])\n#print(auth_2)\n\nauth_3 = auth_3[:100]\nfor i in range(0, len(auth_3)):\n auth_3[i] = int(auth_3[i])\n#print(auth_3)\n\n\n# In[ ]:\n\n\n#normalised author 1 scores\nnorm_auth_1 =[]\nmax1 = max(auth_1)\nmin1 = min(auth_1)\n\nfor a in range(len(auth_1)):\n norm_auth_1.append((auth_1[a])/(max1))\nprint(norm_auth_1)\n\n\n# In[ ]:\n\n\n#normalised author 2 scores\nnorm_auth_2 =[]\nmax1 = max(auth_2)\nmin1 = min(auth_2)\n\nfor a in range(len(auth_2)):\n norm_auth_2.append((auth_2[a])/(max1))\nprint(norm_auth_2)\n\n\n# In[ ]:\n\n\n#normalised author 3 scores\nnorm_auth_3 =[]\nmax1 = max(auth_3)\nmin1 = min(auth_3)\n\nfor a in range(len(auth_3)):\n norm_auth_3.append((auth_3[a])/(max1))\nprint(norm_auth_3)\n\n\n# In[ ]:\n\n\n#Average normalised author scores\navg_norm_score = []\nfor i in range(len(auth_1)):\n avg_norm_score.append(round((norm_auth_1[i]+norm_auth_2[i]+norm_auth_3[i])/3,2))\nprint(avg_norm_score)\n\n\n# # BLEU4 without smoothing\n\n# In[ ]:\n\n\nbleu4=[]\nfor i in range(len(auth_1)):\n bleu4.append(round(sentence_bleu([refs[i]], preds[i]),2))\nprint(bleu4)\n\n\n# In[ ]:\n\n\nnorm_bleu4 = []\n\nmax_b4=max(bleu4)\nmin_b4=min(bleu4)\n\nfor a in range(len(bleu4)):\n norm_bleu4.append(round(((bleu4[a])/(max_b4)),2))\nprint(norm_bleu4)\n\n\n# In[ ]:\n\n\nfrom scipy.stats import spearmanr\n# calculate spearman's correlation\ncoef, p = spearmanr(norm_bleu4, avg_norm_score)\nprint('Spearmans correlation coefficient: %.3f' % coef)\n# interpret the significance\nalpha = 0.05\nif p > alpha:\n\tprint('uncorrelated (fail to reject H0) p=%.3f' % p)\nelse:\n\tprint('correlated (reject H0) p=%.3f' % p)\n\n\n# # BLEU4 with smoothing\n\n# BLEUNorm \n\n# In[ ]:\n\n\nbleun=[]\nfor i in range(len(auth_1)):\n bleun.append(round(sentence_bleu([refs[i]], preds[i],smoothing_function=SmoothingFunction().method2),2))\nprint(bleun)\n\n\n# In[ ]:\n\n\nnorm_bleun = []\n\nmax_bn=max(bleun)\nmin_bn=min(bleun)\n\nfor a in range(len(bleun)):\n norm_bleun.append(round(((bleun[a])/(max_bn)),2))\nprint(norm_bleun)\n\n\n# In[ ]:\n\n\nfrom scipy.stats import spearmanr\n# calculate spearman's correlation\ncoef, p = spearmanr(norm_bleun, avg_norm_score)\nprint('Spearmans correlation coefficient: %.3f' % coef)\n# interpret the significance\nalpha = 0.05\nif p > alpha:\n\tprint('uncorrelated (fail to reject H0) p=%.3f' % p)\nelse:\n\tprint('correlated (reject H0) p=%.3f' % p)\n\n\n# BLEUCC\n\n# In[ ]:\n\n\nbleucc=[]\nfor i in range(len(auth_1)):\n bleucc.append(round(sentence_bleu([refs[i]], preds[i],smoothing_function=SmoothingFunction().method5),2))\nprint(bleucc)\n\n\n# In[ ]:\n\n\nnorm_bleucc = []\n\nmax_bcc=max(bleucc)\nmin_bcc=min(bleucc)\n\nfor a in range(len(bleucc)):\n norm_bleucc.append(round(((bleucc[a])/(max_bcc)),2))\nprint(norm_bleucc)\n\n\n# In[ ]:\n\n\nfrom scipy.stats import spearmanr\n# calculate spearman's correlation\ncoef, p = spearmanr(norm_bleucc, avg_norm_score)\nprint('Spearmans correlation coefficient: %.3f' % coef)\n# interpret the significance\nalpha = 0.05\nif p > alpha:\n\tprint('uncorrelated (fail to reject H0) p=%.3f' % p)\nelse:\n\tprint('correlated (reject H0) p=%.3f' % p)\n\n","sub_path":"Experimental results/Effect of Smoothing.py","file_name":"Effect of Smoothing.py","file_ext":"py","file_size_in_byte":3733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"566376262","text":"import sys\nfrom typing import List\nfrom typing import Tuple\n\nfrom utils.io import read_input_list\n\ndef find_seat(ticket: str, plane_size: Tuple[int, int]):\n # find row\n rows = list(range(plane_size[0]))\n row = binary_search(directions=ticket[:7], item_range=rows)\n \n # find col\n cols = list(range(plane_size[1]))\n col = binary_search(directions=ticket[-3:], item_range=cols)\n \n #return seat number\n return row * 8 + col\n\ndef binary_search(directions: str, item_range: List[int]):\n n_directions = len(directions)\n current_direction = directions[0]\n \n midpoint = int(len(item_range) / 2)\n \n # base case when down to last direction\n if n_directions == 1: \n if current_direction in {'F', 'L'}:\n return item_range[0]\n else:\n return item_range[1]\n\n # otherwise call binary_search recursively on applicable half of range with the\n # remaining directions\n else:\n if current_direction in {'F', 'L'}:\n return binary_search(directions=directions[1:], item_range=item_range[:midpoint])\n else:\n return binary_search(directions=directions[1:], item_range=item_range[midpoint:])\n\n\ndef part_one(input_file: str, plane_size: Tuple[int, int]) -> int:\n # read input file\n inputs = read_input_list(input_file, strip_new_line=True)\n\n # iterate through tickets and find the seat id\n # keep track of max seat id along the way\n max_seat_id = -float('inf')\n for ticket in inputs:\n seat_id = find_seat(ticket=ticket, plane_size=plane_size)\n if seat_id > max_seat_id:\n max_seat_id = seat_id\n\n return max_seat_id\n\n\ndef part_two(input_file: str, plane_size: Tuple[int, int]) -> int:\n # read input file\n inputs = read_input_list(input_file, strip_new_line=True)\n\n # collect known seats\n known_seats = [find_seat(ticket=ticket, plane_size=plane_size) for ticket in inputs]\n\n # for each seat check if there is an empty seat on either side of the seat\n # between another known seat\n for seat in known_seats:\n if seat - 1 not in known_seats and seat - 2 in known_seats:\n return seat - 1\n \n if seat + 1 not in known_seats and seat + 2 in known_seats:\n return seat + 1\n \n\ndef main():\n try:\n _, task = sys.argv\n except ValueError:\n task = None\n\n if task == 'part-one':\n result = part_one(input_file='inputs/05.txt', plane_size=(128, 8))\n elif task == 'part-two':\n result = part_two(input_file='inputs/05.txt', plane_size=(128, 8))\n else:\n print(f\"Must specify 'part-one' or 'part-two'. Usage: python {__file__} [part-one OR part-two]\")\n return\n \n print(\"Result:\", result)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"2020/05.py","file_name":"05.py","file_ext":"py","file_size_in_byte":2783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"99655360","text":"import data_mine as dm\n\nfrom data_mine.nlp.allen_ai_arc import ARCType\n\n\ndef main():\n df = dm.ALLEN_AI_ARC(ARCType.TEST_EASY)\n print(df) # Shows something similar to the example below.\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"examples/nlp/allen_ai_arc/simple.py","file_name":"simple.py","file_ext":"py","file_size_in_byte":232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"183368522","text":"import pandas as pd\nimport csv\n\ndef write_csv(filename,data):\n \"\"\"function to create and write into a csv file\n\n Args:\n filename (string): name to be given to file\n data (list): list of lists of strings etc\n \"\"\" \n with open(filename, 'w',encoding='utf-8') as outfile:\n writer = csv.writer(outfile)\n writer.writerow(data)\n\n\n\ndef iswebsite(l,outlist):\n \"\"\"function to validate if website is working or not, possible outcomes- YES, MAYBE or NO.\n\n Args:\n l (list): list of websites to be checked\n outlist (string): name of file where output is to be stored, output includes:- '(link,statuscode,result)'\n\n Returns:\n list: returns list of links that are not working\n \"\"\"\n import requests\n headers = {\"User-Agent\":\"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_5) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.1.1 Safari/605.1.15\",\"Accept-Language\": \"en-gb\",\"Accept-Encoding\":\"br, gzip, deflate\",\"Accept\":\"test/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\",\"Referer\":\"http://www.google.com/\"}\n cnt_work=0 #count of links working\n cnt_nt=0 #count of links not working\n cnt_total=len(l) #count of total no of links\n cnt=0 #counts no of iterations\n print(cnt_total)\n exli=[] #list of strings for outlist\n wnt_list=[] #list of URLs that dont work\n for i in l:\n #writes to file every 500 iterations so as to ensure logs\n if cnt%500==0:\n write_csv(outlist,exli)\n try:\n request = requests.get(i,headers=headers,verify=False)\n xyz=request.status_code\n print(xyz)\n if request.status_code == 200:\n print(cnt,'Web site exists')\n cnt_work+=1\n cnt+=1\n exli.append((i,xyz,\"YES\"))\n elif request.status_code == 403:\n print(\"forbidden\")\n cnt+=1\n cnt_work +=1\n exli.append((i,xyz,\"MAYBE\"))\n \n elif request.status_code == 503:\n print(\"server not able to handle request\")\n cnt+=1\n cnt_work+=1\n wnt_list.append((i,xyz))\n exli.append((i,xyz,\"MAYBE\"))\n \n elif request.status_code == 302 or request.status_code == 303 or request.status_code ==301:\n \n r=requests.get(i,headers=headers,verify=False)\n r1=r.url\n print(r1)\n req=requests.head(r1,headers=headers,verify=False)\n \n if req.status_code == 200:\n print(cnt,'Web site exists(redirected)')\n cnt_work+=1\n cnt+=1\n exli.append((i,xyz,\"YES\"))\n else:\n print(cnt,'Web site does not exist(redirected)')\n print(\"error\",req.status_code)\n wnt_list.append((i,req.status_code))\n cnt_nt+=1\n cnt+=1\n exli.append((i,req.status_code,\"MAYBE\"))\n\n \n \n else:\n \n print(cnt,'Web site does not exist') \n print(request.status_code)\n cnt_nt+=1\n wnt_list.append((i,xyz))\n cnt+=1\n exli.append((i,xyz,\"MAYBE\"))\n \n except:\n \n print(cnt,\"error\")\n cnt+=1\n exli.append((i,xyz,\"MAYBE\"))\n write_csv(outlist,exli) \n if cnt_work+cnt_nt==cnt_total:\n w=True\n else:\n w=False\n print(\"\\n working : \",cnt_work,\"\\n not working : \",cnt_nt,\"\\n total websites : \",cnt_total,\"\\n check sum :\",w)\n return wnt_list\n\n\n\ndef batch_process(filename):\n \"\"\"function to convert the output from iswebsite() function into a proper table format.\n Saves the new file by same name as input + 'processed' at the end in the same directory.\n\n Args:\n filename (string): name of file from which input is to be taken(outlist arg from iswebsite() function)\n \"\"\"\n \n df=pd.read_csv(filename,header=None)\n \n df=df.transpose()\n \n \n link_list=[]\n status_code=[]\n work=[]\n success=[]\n\n for i in range(len(df)):\n str1=str(df.iloc[i,0])\n str1=str1[1:]\n str1=str1[:-1]\n str1=str1.replace(\"'\",\"\")\n try:\n \n link,status,wrk=str1.split(\",\")\n link_list.append(link)\n status_code.append(status)\n work.append(wrk)\n success.append(\"done\")\n \n except:\n \n link_list.append(str1[:-10])\n status_code.append(str1[-9:-5])\n work.append(str1[-4:])\n success.append(\"maybe\")\n \n \n \n \n df1=pd.DataFrame()\n df1[\"n.name\"]=link_list\n df1[\"statuscode\"]=status_code\n df1[\"iswork?\"]=work\n df1[\"success\"]=success\n \n temp=str(filename)\n temp.replace(\".csv\",\"\")\n df1.to_csv(temp+\"processed.csv\")\n ","sub_path":"validator.py","file_name":"validator.py","file_ext":"py","file_size_in_byte":5108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"474909873","text":"import threading\nimport sys\n\nfrom Client import Client\nfrom Server import Server\n\n#####################################################################\n# Main script to launch client and server. Run in the\n# following way:\n# python main.py [server_ip] [server_port] [client_ip] [client_port]\n# Launches two threads, one client thread and one server thread.\n#####################################################################\n\n\nclass ClientThread(threading.Thread):\n def __init__(self, ip, port):\n threading.Thread.__init__(self)\n self.ip = ip\n self.port = port\n def run(self):\n c = Client(self.ip, self.port)\n c.start()\n\nclass ServerThread(threading.Thread):\n def __init__(self, ip, port):\n threading.Thread.__init__(self)\n self.ip = ip\n self.port = port\n def run(self):\n s = Server(self.ip, self.port)\n s.start()\n\nif __name__ == '__main__':\n data = sys.argv[1:]\n if data[0] == '-1':\n data[0] = 'localhost'\n if data[1] == '-1':\n data[1] = 10000\n if data[2] == '-1':\n data[2] = 'localhost'\n if data[3] == '-1':\n data[3] = 10000\n s_ip = data[0]\n s_port = int(data[1])\n c_ip = data[2]\n c_port = int(data[3])\n\n st = ServerThread(s_ip, s_port)\n st.start()\n ct = ClientThread(c_ip, c_port)\n ct.start()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"587221140","text":"import json\n\nimport boto3\nimport dropbox\nimport requests\n\nfrom . import Config\n\nAWS_ACCESS_KEY = Config.getValue('AWS', 'AWS_ACCESS_KEY')\nAWS_SECRET_ACCESS_KEY = Config.getValue('AWS', 'AWS_SECRET_ACCESS_KEY')\n\n\ndef download_file(save_path, file_url):\n \"\"\" Download file from http url link \"\"\"\n\n r = requests.get(file_url) # create HTTP response object\n with open(save_path, 'wb') as f:\n f.write(r.content)\n\n return save_path\n\n\ndef download_file_from_dropbox(save_path, link):\n \"\"\" Download file from dropbox\n It is need to get a app access token\"\"\"\n\n dbx = dropbox.Dropbox(\"Access Token\")\n print(dbx.users_get_current_account())\n\n return dbx.sharing_get_shared_link_file(url=link, path=save_path)\n\n\ndef upload_file_to_bucket(bucket, file_path, key, is_public=False):\n \"\"\" Upload files to S3 Bucket \"\"\"\n s3 = boto3.client('s3', aws_access_key_id=AWS_ACCESS_KEY, aws_secret_access_key=AWS_SECRET_ACCESS_KEY)\n\n with open(file_path, 'rb') as data:\n s3.upload_fileobj(data, bucket, key)\n\n if is_public:\n s3.put_object_acl(ACL='public-read', Bucket=bucket, Key=key)\n\n file_url = '%s/%s/%s' % ('https://s3.ap-northeast-2.amazonaws.com', bucket, key)\n return file_url\n\n\ndef download_file_from_bucket(bucket, file_path, key):\n \"\"\" Download file from S3 Bucket \"\"\"\n with open(file_path, 'wb') as data:\n s3.download_fileobj(bucket, key, data)\n return file_path\n\n\ndef request_service(method, url, params):\n \"\"\" Request to whatsit API server\"\"\"\n if method == 'PUT':\n resp = requests.put(url=url, json=params, timeout=60)\n elif method == 'POST':\n resp = requests.post(url=url, params=params, timeout=60)\n else:\n resp = requests.get(url=url, params=params, timeout=60)\n print(resp.status_code)\n return json.loads(resp.text)\n","sub_path":"util/Trans.py","file_name":"Trans.py","file_ext":"py","file_size_in_byte":1836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"319199282","text":"# !/usr/bin/eny python3\n# -*- coning: utf-8 -*-\n_author_ = '谭华锦'\n\n\n# 定义一个类,使用class关键字\nclass Student:\n # pass\n # 类属性\n hobby = '哈哈'\n\n # 实例方法,将self作为第一个参数的方法\n def say_hi(self):\n print('hi' + ' ' + self.name)\n print(self.id)\n\n def say_hello(self, usename='无名氏'):\n print('hello' + ' ' + usename)\n\n # 类方法:使用@classmethod,将cls作为第一个参数\n @classmethod\n def shoew(cls, msg): # cls表示当前的类\n print(msg, cls.hoppy)\n\n\n# 创建类的对象\nstu1 = Student()\nstu2 = Student()\nprint(type(stu1))\n\n# 为对象绑定属性\nstu1.name = 'tom' # 实例属性\nstu1.age = 20\nstu1.id = 2018\nstu2.name = 'tan'\nstu2.age = 22\nstu2.id = 2019\n\nprint(stu1.name, stu1.age)\n\n# 访问实例方法\nstu1.say_hi()\nstu2.say_hi()\nstu1.say_hello()\nprint('*' * 80)\n\n# 访问类属性\nprint(Student.hobby)\nstu1.hobby = 'enen'\nprint(stu1.hobby)\nprint(Student.hoppy)\n","sub_path":"py03_面向对象/01.定义.py","file_name":"01.定义.py","file_ext":"py","file_size_in_byte":989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"641456653","text":"import argparse\nimport os\nfrom utils import get_free_gpu\nfrom dataset import ProxyTaskDataset\nfrom torch.utils.data import DataLoader\n\nimport torch\n\nfrom i3d import I3D\nfrom odd_one_out import O3N\n\n\n\nparser = argparse.ArgumentParser(description='Self supervised learning script')\nparser.add_argument('--data', type=str, default='../datasets/UCF101_frames', metavar='D',\n help=\"folder where data is located.\")\nparser.add_argument('--video-list-directory', type=str, default='../datasets/ucfTrainTestlist', metavar='SD',\n help=\"directory where the video lists are stored\")\nparser.add_argument('--sampling', type=str, default='random', metavar='SA,',\n help=\"Sampling strategy (random, consecutive or constrained consecutive).\")\nparser.add_argument('--n_questions', type=int, default=6, metavar='Q,',\n help=\"Number of questions\")\nparser.add_argument('--n_samples', type=int, default=10, metavar='s,',\n help=\"Number of samples ie. frames\")\nparser.add_argument('--model', type=str, metavar='M',\n help=\"the model file to be evaluated. Usually it is of the form model_X.pth\")\n\nargs = parser.parse_args()\nuse_cuda = torch.cuda.is_available()\n\nvalidation_set = ProxyTaskDataset(root=args.data, video_info_path=os.path.join(args.video_list_directory, 'vallist1.txt'), sampling=args.sampling, n_samples=args.n_samples, n_questions=args.n_questions)\nval_loader = DataLoader(validation_set, batch_size=128, shuffle=False)\n\nstate_dict = torch.load(args.model)\ni3d = I3D(num_classes=400)\nmodel = O3N(i3d, n_questions=args.n_questions)\nmodel.load_state_dict(state_dict)\nfor param in model.parameters():\n if param.requires_grad:\n param.requires_grad = False\n\nif use_cuda:\n print('Using GPU')\n free_gpu_id = get_free_gpu()\n device = \"cuda:{}\".format(free_gpu_id)\n model.to(device)\nelse:\n print('Using CPU')\n\ndef validation():\n model.eval()\n validation_loss = 0\n correct = 0\n count = 0\n for data, target in val_loader:\n count += 1\n if use_cuda:\n data, target = data.to(device), target.to(device)\n output = model(data)\n # sum up batch loss\n criterion = torch.nn.CrossEntropyLoss(reduction='elementwise_mean')\n validation_loss += criterion(output, target).data.item()\n # get the index of the max log-probability\n pred = output.data.max(1, keepdim=True)[1]\n correct += pred.eq(target.data.view_as(pred)).cpu().sum()\n print(count)\n validation_loss /= len(val_loader.dataset)\n \n print('\\nValidation set: Average loss: {:.4f}, Accuracy: {}/{} ({:.4f}%)\\n'.format(\n validation_loss, correct, len(val_loader.dataset),\n 100. * correct / len(val_loader.dataset)))\n return validation_loss\n\nvalidation()\n","sub_path":"src/evaluateO3N.py","file_name":"evaluateO3N.py","file_ext":"py","file_size_in_byte":2880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"297473973","text":"\"\"\"\n求解《百钱百鸡》问题\n1只公鸡5元 1只母鸡3元 3只小鸡1元 用100元买100只鸡\n问公鸡 母鸡 小鸡各有多少只\n公鸡x只 母鸡y只 小鸡 100 - x - y只\n5x + 3y + z / 3 = 100\nx + y + z = 100\n\"\"\"\n# 公鸡最多买20只\nfor x in range(0, 20):\n # 母鸡最多买33只\n for y in range(0, 33):\n z = 100 - x - y\n if z >= 0 and 5 * x + 3 * y + z / 3 == 100:\n print(\"公鸡%d只、母鸡%d只、小鸡%d只\" % (x, y, z))\n","sub_path":"day-02/分支循环小练习/百鸡百钱.py","file_name":"百鸡百钱.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"617857299","text":"\"\"\"\nTitle: Deep Q-Learning for Atari Breakout\nAuthor: [Jacob Chapman](https://twitter.com/jacoblchapman) and [Mathias Lechner](https://twitter.com/MLech20)\nDate created: 2020/05/23\nLast modified: 2020/06/17\nDescription: Play Atari Breakout with a Deep Q-Network.\n\"\"\"\n\"\"\"\n## Introduction\nThis script shows an implementation of Deep Q-Learning on the\n`BreakoutNoFrameskip-v4` environment.\n### Deep Q-Learning\nAs an agent takes actions and moves through an environment, it learns to map\nthe observed state of the environment to an action. An agent will choose an action\nin a given state based on a \"Q-value\", which is a weighted reward based on the\nexpected highest long-term reward. A Q-Learning Agent learns to perform its\ntask such that the recommended action maximizes the potential future rewards.\nThis method is considered an \"Off-Policy\" method,\nmeaning its Q values are updated assuming that the best action was chosen, even\nif the best action was not chosen.\n### Atari Breakout\nIn this environment, a board moves along the bottom of the screen returning a ball that\nwill destroy blocks at the top of the screen.\nThe aim of the game is to remove all blocks and breakout of the\nlevel. The agent must learn to control the board by moving left and right, returning the\nball and removing all the blocks without the ball passing the board.\n### Note\nThe Deepmind paper trained for \"a total of 50 million frames (that is, around 38 days of\ngame experience in total)\". However this script will give good results at around 10\nmillion frames which are processed in less than 24 hours on a modern machine.\n### References\n- [Q-Learning](https://link.springer.com/content/pdf/10.1007/BF00992698.pdf)\n- [Deep Q-Learning](https://deepmind.com/research/publications/human-level-control-through-deep-reinforcement-learning)\n\"\"\"\n\"\"\"\n## Setup\n\"\"\"\nimport os\nimport sys\nimport time\n\n# os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\n# os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(sys.argv[1])\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nimport gym\n\n# Configuration paramaters for the whole setup\nseed = 42\ngamma = 1. # Discount factor for past rewards\nepsilon = tf.Variable(1.0, trainable=False) # Epsilon greedy parameter\nepsilon.assign(1.0)\nepsilon_min = 0.02 # Minimum epsilon greedy parameter\nepsilon_max = 1.0 # Maximum epsilon greedy parameter\nepsilon_interval = (\n epsilon_max - epsilon_min\n) # Rate at which to reduce chance of random action being taken\nbatch_size = 32 # Size of batch taken from replay buffer\n\nmax_steps_per_episode = 10000\nepsilon_greedy_decay_prop = 0.1 # proprtion of total timesteps over which epsilon is decayed\nexploration_proportion = 0.001 # proportion of total timesteps in which agent engages in pure exploration\ntotal_timesteps = 100000\nnum_actions = 2\ntimeout_steps = 200\ntrain = True\nneg_fall_reward = 5\nenv_string = \"CartPole-v0\"\nexp_name = \"100k_openai\"\nDDQN = True\n\n# Use the Baseline Atari environment because of Deepmind helper functions\nenv = gym.make(env_string)\n# Warp the frames, grey scale, stake four frame and scale to smaller ratio\n#env = wrap_deepmind(env, frame_stack=True, scale=True)\nenv.seed(seed)\n\n\"\"\"\n## Implement the Deep Q-Network\nThis network learns an approximation of the Q-table, which is a mapping between\nthe states and actions that an agent will take. For every state we'll have four\nactions, that can be taken. The environment provides the state, and the action\nis chosen by selecting the larger of the four Q-values predicted in the output layer.\n\"\"\"\n\n\ndef ortho_init(scale=1.0):\n def _ortho_init(shape, dtype, partition_info=None):\n #lasagne ortho init for tf\n shape = tuple(shape)\n if len(shape) == 2:\n flat_shape = shape\n elif len(shape) == 4: # assumes NHWC\n flat_shape = (np.prod(shape[:-1]), shape[-1])\n else:\n raise NotImplementedError\n a = np.random.normal(0.0, 1.0, flat_shape)\n u, _, v = np.linalg.svd(a, full_matrices=False)\n q = u if u.shape == flat_shape else v # pick the one with the correct shape\n q = q.reshape(shape)\n return (scale * q[:shape[0], :shape[1]]).astype(np.float32)\n return _ortho_init\n\ndef create_q_mlp(input_shape=2, num_layers=2, num_hidden=64, activation=tf.tanh, hidden_units=256, num_actions=2):\n x_input = tf.keras.Input(shape=input_shape)\n # h = tf.keras.layers.Flatten(x_input)\n h = x_input\n for i in range(num_layers):\n h = tf.keras.layers.Dense(units=num_hidden, kernel_initializer=ortho_init(np.sqrt(2)),\n name='mlp_fc{}'.format(i), activation=activation)(h)\n latent = tf.keras.layers.Flatten()(h)\n action_out = tf.keras.layers.Dense(units=hidden_units, activation=\"relu\")(latent)\n q_out = tf.keras.layers.Dense(units=num_actions, activation=None)(action_out)\n\n network = tf.keras.Model(inputs=[x_input], outputs=[q_out])\n return network\n\ndef simulate(env_string=\"CartPole-v1\", num_episodes=1000, exp_name=exp_name):\n model = create_q_mlp(input_shape=4, num_layers=2, num_hidden=64, num_actions=2)\n model_target = create_q_mlp(input_shape=4, num_layers=2, num_hidden=64, num_actions=2)\n env = gym.make(env_string)\n\n checkpoints_dir = os.path.join(\".\", \"checkpoints\", env_string, exp_name)\n checkpoint = tf.train.Checkpoint(model=model, model_target=model_target)\n manager = tf.train.CheckpointManager(checkpoint, checkpoints_dir, max_to_keep=40)\n\n if not manager.latest_checkpoint:\n print(\"No saved model\")\n return\n\n for episode in range(num_episodes):\n state = np.array(env.reset())\n done = False\n ep_reward = 0\n\n while not done:\n env.render()\n\n state_tensor = tf.convert_to_tensor(state)\n state_tensor = tf.expand_dims(state_tensor, 0)\n q_tensor = model_target.predict(state_tensor)\n\n # Take best action\n action = tf.argmax(q_tensor[0]).numpy()\n # Apply the sampled action in our environment\n state_next, reward, done, _ = env.step(action)\n state_next = np.array(state_next)\n ep_reward += reward\n\n state = state_next\n\nif train:\n # The first model makes the predictions for Q-values which are used to\n # make a action.\n model = create_q_mlp(input_shape=4, num_layers=2, num_hidden=64, num_actions=2)\n # Build a target model for the prediction of future rewards.\n # The weights of a target model get updated every 10000 steps thus when the\n # loss between the Q-values is calculated the target Q-value is stable.\n model_target = create_q_mlp(input_shape=4, num_layers=2, num_hidden=64, num_actions=2)\n\n\n \"\"\"\n ## Train\n \"\"\"\n # In the Deepmind paper they use RMSProp however then Adam optimizer\n # improves training time\n optimizer = keras.optimizers.Adam(learning_rate=1.e-3)\n\n # Experience replay buffers\n action_history = []\n state_history = []\n state_next_history = []\n rewards_history = []\n done_history = []\n pole_fall_history = []\n episode_reward_history = []\n running_reward = 0\n episode_count = 0\n total_timestep_count = tf.Variable(0, trainable=False)\n total_timestep_count.assign(0) # sugar for chekcpointing\n # Number of frames to take random action and observe output\n exploration_timesteps = int(exploration_proportion * total_timesteps)\n print(\"Number of pure exploration timesteps: {}\".format(exploration_timesteps))\n # Number of frames for exploration\n epsilon_greedy_timesteps = int(epsilon_greedy_decay_prop * total_timesteps)\n print(\"Number of timesteps over which epsilon will be decayed: {}\".format(epsilon_greedy_timesteps))\n # Maximum replay length\n # Note: The Deepmind paper suggests 1000000 however this causes memory issues\n max_memory_length = 50000\n # Train the model after 1 actions\n update_after_actions = 1\n # How often to update the target network\n update_target_network = 500\n # From OpenAI Baseline\n grad_norm_clipping = 10\n # Using huber loss for stability\n loss_function = keras.losses.Huber()\n\n if DDQN:\n arch = \"DDQN\"\n else:\n arch = \"DQN\"\n\n checkpoints_dir = os.path.join(\".\", \"checkpoints\", env_string, exp_name, arch)\n checkpoint = tf.train.Checkpoint(model=model, model_target=model_target,\n epsilon=epsilon, total_timestep_count=total_timestep_count)\n manager = tf.train.CheckpointManager(checkpoint, checkpoints_dir, max_to_keep=40)\n checkpoint_timesteps = 1000\n\n checkpoint.restore(manager.latest_checkpoint)\n epsilon = epsilon.assign_add(0.).numpy()\n total_timestep_count = total_timestep_count.assign_add(0).numpy()\n\n start_time = time.time()\n while total_timestep_count < total_timesteps:\n state = np.array(env.reset())\n episode_reward = 0\n\n done = False\n for timestep in range(1, max_steps_per_episode):\n #env.render() # Adding this line would show the attempts\n # of the agent in a pop up window.\n total_timestep_count += 1\n\n if total_timestep_count % checkpoint_timesteps == 0:\n # print(\"Time elapsed: {:.1f}. Total number of frames: {}, epsilon:{:.1f}\".format(time.time() - start_time,\n # total_timestep_count, epsilon))\n manager.save()\n\n # Use epsilon-greedy for exploration\n if total_timestep_count < exploration_timesteps or epsilon > np.random.rand(1)[0]:\n # Take random action\n action = np.random.choice(num_actions)\n else:\n # Predict action Q-values\n # From environment state\n state_tensor = tf.convert_to_tensor(state)\n state_tensor = tf.expand_dims(state_tensor, 0)\n q_values_rep_buf = model(state_tensor, training=False)\n # Take best action\n action = tf.argmax(q_values_rep_buf[0]).numpy()\n\n # Decay probability of taking random action\n epsilon -= epsilon_interval / epsilon_greedy_timesteps\n epsilon = max(epsilon, epsilon_min)\n\n # Apply the sampled action in our environment\n state_next, reward, done, _ = env.step(action)\n state_next = np.array(state_next)\n\n episode_reward += reward\n\n # Save actions and states in replay buffer\n action_history.append(action)\n state_history.append(state)\n state_next_history.append(state_next)\n done_history.append(done)\n rewards_history.append(reward)\n if done and timestep < timeout_steps:\n pole_fall_history.append(True)\n # print(done, True)\n else:\n pole_fall_history.append(False)\n # print(done, False)\n state = state_next\n\n # Update every fourth frame and once batch size is over 32\n if total_timestep_count % update_after_actions == 0 and len(done_history) > batch_size:\n #print(\"here\")\n\n # Get indices of samples for replay buffers\n indices = np.random.choice(range(len(done_history)), size=batch_size)\n\n # Using list comprehension to sample from replay buffer\n state_sample = np.array([state_history[i] for i in indices])\n state_next_sample = np.array([state_next_history[i] for i in indices])\n rewards_sample = [rewards_history[i] for i in indices]\n action_sample = [action_history[i] for i in indices]\n done_sample = tf.convert_to_tensor(\n [float(done_history[i]) for i in indices]\n )\n pole_fall_sample = tf.convert_to_tensor(\n [float(pole_fall_history[i]) for i in indices]\n )\n\n # Build the updated Q-values for the sampled future states\n # Use the target model for stability\n future_rewards_targ = model_target.predict(state_next_sample)\n # Q value = reward + discount factor * expected future reward\n\n if not DDQN:\n updated_q_values = rewards_sample + gamma * tf.reduce_max(\n future_rewards_targ, axis=1)\n else:\n future_rewards = model.predict(state_next_sample)\n future_rewards_argmax = tf.argmax(future_rewards, axis=1)\n future_rewards_argmax_onehot = tf.one_hot(future_rewards_argmax, num_actions)\n updated_q_values = rewards_sample + gamma * tf.reduce_sum(future_rewards_argmax_onehot * future_rewards_targ, axis=1)\n\n # If done set target to zero\n updated_q_values = updated_q_values * (1. - done_sample)\n\n # Create a mask so we only calculate loss on the updated Q-values\n masks = tf.one_hot(action_sample, num_actions)\n\n with tf.GradientTape() as tape:\n # Train the model on the states and updated Q-values\n q_values = model(state_sample)\n\n # Apply the masks to the Q-values to get the Q-value for action taken\n q_action = tf.reduce_sum(tf.multiply(q_values, masks), axis=1)\n # Calculate loss between new Q-value and old Q-value\n loss = loss_function(updated_q_values, q_action)\n\n # Backpropagation\n grads = tape.gradient(loss, model.trainable_variables)\n clipped_grads = []\n for grad in grads:\n clipped_grads.append(tf.clip_by_norm(grad, grad_norm_clipping))\n clipped_grads = grads\n grads_and_vars = zip(grads, model.trainable_variables)\n\n optimizer.apply_gradients(zip(grads, model.trainable_variables))\n\n if total_timestep_count % update_target_network == 0:\n # update the the target network with new weights\n model_target.set_weights(model.get_weights())\n # Log details\n template = \"running reward: {:.2f} at episode {}, cumulative timesteps {}\"\n print(template.format(running_reward, episode_count, total_timestep_count))\n\n # Limit the state and reward history\n if len(rewards_history) > max_memory_length:\n del rewards_history[:1]\n del state_history[:1]\n del state_next_history[:1]\n del action_history[:1]\n del done_history[:1]\n\n if done:\n print(\"Time elapsed: {:.1f}. Cumulative timestep: {}. Epsilon: {:.4f}. Episode reward: {}\"\n .format(time.time() - start_time, total_timestep_count, epsilon, episode_reward))\n print(\"model_layer_weights\", model.get_weights()[0][0][0])\n if len(done_history) > batch_size:\n print(\"Average q_value: {:.1f}\".format(tf.reduce_mean(q_values).numpy()))\n break\n\n # Update running reward to check condition for solving\n episode_reward_history.append(episode_reward)\n if len(episode_reward_history) > 100:\n del episode_reward_history[:1]\n running_reward = np.mean(episode_reward_history)\n\n episode_count += 1\n\nelse:\n simulate()","sub_path":"keras_baseline_cartpole.py","file_name":"keras_baseline_cartpole.py","file_ext":"py","file_size_in_byte":15620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"321607442","text":"#reproducibility\n\nseed_value = 1\nimport os\nos.environ['PYTHONHASHSEED']=str(seed_value)\nimport random\nrandom.seed(seed_value)\nimport numpy as np\nnp.random.seed(seed_value)\nimport tensorflow\ntensorflow.compat.v1.set_random_seed(seed_value)\nfrom tensorflow.keras import backend as K\nsession_conf = tensorflow.compat.v1.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)\nsess = tensorflow.compat.v1.Session(graph=tensorflow.compat.v1.get_default_graph(), config=session_conf)\ntensorflow.compat.v1.keras.backend.set_session(sess)\n\n#other imports\nimport parselmouth\nimport os\nimport librosa\nimport scipy.io.wavfile as wav\nfrom speechpy.feature import mfcc\nfrom keras.utils import np_utils\nfrom typing import Tuple\nimport numpy\nfrom sklearn.metrics import accuracy_score, confusion_matrix\nfrom sklearn.model_selection import StratifiedKFold, LeaveOneOut, KFold\nimport numpy as np\nfrom tensorflow.keras import Sequential\nfrom tensorflow.keras.layers import LSTM, Dense, Dropout,Masking\nfrom tqdm import tqdm\nfrom tensorflow.keras.callbacks import EarlyStopping,ModelCheckpoint\nfrom tensorflow.keras.models import load_model\nfrom parselmouth.praat import call\nimport sys\nfrom spafe.features.lpc import lpcc\nimport gc\nimport time\n\n#redirect prints\norig_stdout = sys.stdout\nstdoutpath_f = r'C:\\Users\\mp95\\PycharmProjects\\Thesis\\logs\\no_it_10k\\fulldropouts\\LSTM512.txt'\nstdoutpath_l = r'C:\\Users\\mp95\\PycharmProjects\\Thesis\\logs\\no_it_10k\\lastdropouts\\LSTM512.txt'\nstdoutpath_n = r'C:\\Users\\mp95\\PycharmProjects\\Thesis\\logs\\no_it_10k\\nodropouts\\LSTM512.txt'\n#stdoutpath_featureanalysis =r'C:\\Users\\mp95\\PycharmProjects\\Thesis\\logs\\no_it_10k\\selected_feature_analysis\\LSTM256_128_mfccdeltasformantsintensitypitch.txt'\n#f = open(stdoutpath_featureanalysis, 'w')\n#sys.stdout = f\n\n#parameters\nmean_signal_length = 32000\ndata_path = r'F:\\EMOVO/'\nclass_labels = (\"Sad\", \"Happy\", \"Angry\", \"Neutral\")\n#parselmouth can be used only with full padding without altering original files\n#fp o sp, beware in sp only mfcc are functioning\n#\"mfcc\",\"deltas\",\"formants\",\"pitch\",\"intensity\"\nfeatures = (\"mfcc\",\"pitch\")\nsplits = 10\nsignal_mode = 'fp'\nspecial_value = 100\nroutine_it = 1\nepochs_n = 80\n\n\ndef get_feature_vector_from_formants(filepath,feature_vector):\n path = (filepath)\n sound = parselmouth.Sound(path) # read the sound\n pitch = sound.to_pitch_ac()\n meanF0 = call(pitch, \"Get mean\", 0, 0, \"Hertz\") # get mean pitch\n if meanF0 > 150:\n maxFormant = 5500 # women\n else:\n maxFormant = 5000 # men\n formant = call(sound, \"To Formant (burg)\", 0.0, 5, maxFormant, 0.025, 50) # even if i need 3 formants i calculate 5, it behaves better apparently\n local_formant = []\n for x in formant.xs():\n for f in range(1, 4):\n local_formant.append(formant.get_value_at_time(f, x))\n formant_array = np.reshape(local_formant, (formant.get_number_of_frames(), 3))\n #padding array to match first feature in features array length with special masking value\n if features.index('formants') is not 0:\n if formant_array.shape[0] < feature_vector.shape[0]:\n formant_array = np.pad(formant_array, ((0, feature_vector.shape[0] - formant_array.shape[0]), (0, 0)),\n 'constant', constant_values=(0, 100))\n elif formant_array.shape[0] > feature_vector.shape[0]:\n pad_len = formant_array.shape[0] - feature_vector.shape[0]\n pad_len //= 2\n formant_array = formant_array[pad_len:pad_len + feature_vector.shape[0]]\n #standardization\n for i in range(0, 3):\n formant_array[:, i] = formant_array[:, i] / np.linalg.norm(formant_array[:, i])\n if features.index('formants') is not 0:\n formant_array = np.concatenate((feature_vector, formant_array), axis=1)\n return formant_array\n\ndef get_feature_vector_from_pitch(filepath,feature_vector):\n path = (filepath)\n signal = parselmouth.Sound(path)\n pitch = signal.to_pitch_ac(time_step = 0.01,pitch_floor=150,very_accurate=True)\n #pitch = signal.to_pitch_ac()\n x_sample = pitch.selected_array['frequency']\n #x_sample = x_sample/np.linalg.norm(x_sample)\n if features.index('pitch') is not 0:\n if len(x_sample) < feature_vector.shape[0]:\n x_sample = np.pad(x_sample, ((0, feature_vector.shape[0] - len(x_sample))), 'constant', constant_values=100)\n elif len(x_sample) > feature_vector.shape[0]:\n pad_len = len(x_sample) - feature_vector.shape[0]\n pad_len //= 2\n x_sample = x_sample[pad_len:pad_len + feature_vector.shape[0]]\n x_sample = np.reshape(x_sample, (len(x_sample), 1))\n if features.index('pitch') is not 0:\n x_sample = np.concatenate((feature_vector,x_sample),axis=1)\n return x_sample\n\ndef get_feature_vector_from_intensity(filepath,feature_vector):\n path = (filepath)\n signal = parselmouth.Sound(path)\n x_intensity_local =[]\n intensity = signal.to_intensity(time_step=0.01, minimum_pitch=150)\n for x in intensity.xs():\n x_intensity_local.append(intensity.get_value(time=x))\n x_sample = np.array(x_intensity_local)\n x_sample = x_sample / np.linalg.norm(x_sample)\n if features.index('intensity') is not 0:\n if len(x_sample) < feature_vector.shape[0]:\n x_sample = np.pad(x_sample, ((0, feature_vector.shape[0] - len(x_sample))), 'constant', constant_values=100)\n if len(x_sample) > feature_vector.shape[0]:\n pad_len = len(x_sample) - feature_vector.shape[0]\n pad_len //= 2\n x_sample = x_sample[pad_len:pad_len + feature_vector.shape[0]]\n x_sample = np.reshape(x_sample, (len(x_sample), 1))\n if features.index('intensity') is not 0:\n x_sample = np.concatenate((feature_vector, x_sample), axis=1)\n return x_sample\n\ndef get_feature_vector_from_mfcc(signal,fs):\n #window 0.2 , stride 0.1\n mel_coefficients = mfcc(signal, fs, frame_stride=0.01,num_cepstral=13)\n mel_coefficients_for_deltas = mel_coefficients\n return mel_coefficients,mel_coefficients_for_deltas\n\ndef get_feature_vector_from_deltas(data):\n delta1 = librosa.feature.delta(data)\n concatenated=np.concatenate((data, delta1), axis=1)\n permutation = []\n for i in range(0, int(concatenated.shape[1] / 2)):\n permutation.append(i)\n permutation.append(13 + i)\n permutation = np.array(permutation)\n feature_vector = concatenated[:, permutation]\n return feature_vector\n\ndef get_feature_vector_from_lpcc(signal,fs,feature_vector):\n # compute lpccs\n x_sample = lpcc(sig=signal, fs=fs, win_len=0.02, win_hop=0.01, num_ceps=13, lifter=0, normalize=True)\n if features.index('lpcc') is not 0:\n if x_sample.shape[0] < feature_vector.shape[0]:\n x_sample = np.pad(x_sample, ((0, feature_vector.shape[0] - x_sample.shape[0]), (0, 0)),\n 'constant', constant_values=(0, 100))\n if x_sample.shape[0] > feature_vector.shape[0]:\n pad_len = x_sample.shape[0] - feature_vector.shape[0]\n pad_len //= 2\n x_sample = x_sample[pad_len:pad_len + feature_vector.shape[0]]\n if features.index('lpcc') is not 0:\n x_sample = np.concatenate((feature_vector, x_sample), axis=1)\n return x_sample\n\n\ndef padding(X):\n # Padding\n max_seq_len = 0\n for e in X:\n if e.shape[0] > max_seq_len:\n max_seq_len = e.shape[0]\n X_pad = np.full((len(X), max_seq_len, X[0].shape[1]),dtype=np.float32,fill_value=special_value)\n for s, x in enumerate(X):\n seq_len = x.shape[0]\n X_pad[s, 0:seq_len, :] = x\n return X_pad\n\ndef signal_slicing_padding(signal):\n s_len = len(signal)\n # pad the signals to have same size if lesser than required\n # else slice them\n if s_len < mean_signal_length:\n pad_len = mean_signal_length - s_len\n pad_rem = pad_len % 2\n pad_len //= 2\n signal = np.pad(signal, (pad_len, pad_len + pad_rem),\n 'constant', constant_values=100)\n else:\n pad_len = s_len - mean_signal_length\n pad_len //= 2\n signal = signal[pad_len:pad_len + mean_signal_length]\n return signal\n\n\ndef get_data(data_path: str,class_labels: Tuple) -> \\\n Tuple[np.ndarray, np.ndarray]:\n data = []\n labels = []\n names = []\n cur_dir = os.getcwd()\n sys.stderr.write('curdir: %s\\n' % cur_dir)\n os.chdir(data_path)\n for i, directory in enumerate(class_labels):\n sys.stderr.write(\"started reading folder %s\\n\" % directory)\n os.chdir(data_path+directory)\n for filename in tqdm(os.listdir()):\n filepath = os.getcwd() + '/' + filename\n fs, signal = wav.read(filepath)\n feature_vector=[]\n if signal_mode == 'sp':\n signal = signal_slicing_padding(signal)\n if 'mfcc' in features:\n if features.index('mfcc') is not 0:\n sys.exit(\"\\n###### please put mfcc as the first element of the feature array, aborting execution... ######\")\n else:\n feature_vector, mel_coefficients = get_feature_vector_from_mfcc(signal, fs)\n if 'lpcc' in features:\n feature_vector = get_feature_vector_from_lpcc(signal,fs,feature_vector)\n if 'deltas' in features:\n if 'mfcc' in features:\n feature_vector = get_feature_vector_from_deltas(mel_coefficients)\n else:\n sys.exit(\"\\n ###### can't compute deltas without mfcc, aborting execution ######\")\n if 'formants' in features:\n feature_vector = get_feature_vector_from_formants(filepath,feature_vector)\n if 'pitch' in features:\n feature_vector = get_feature_vector_from_pitch(filepath, feature_vector)\n if 'intensity' in features:\n feature_vector = get_feature_vector_from_intensity(filepath,feature_vector)\n data.append(feature_vector)\n labels.append(i)\n names.append(filename)\n names.append(filename)\n sys.stderr.write(\"ended reading folder %s\\n\" % directory)\n os.chdir('../..')\n os.chdir(cur_dir)\n return np.array(data), np.array(labels)\n\n\ndef predict(model, samples: numpy.ndarray) -> Tuple:\n results = []\n for _, sample in enumerate(samples):\n results.append(np.argmax(model.predict(np.array([sample]))))\n return tuple(results)\n\n\ndef evaluate(model, x_test: numpy.ndarray, y_test: numpy.ndarray) -> None:\n predictions = predict(model, x_test)\n print('Accuracy:%.3f\\n' % accuracy_score(y_pred=predictions,\n y_true=y_test))\n print('Confusion matrix:\\n', confusion_matrix(y_pred=predictions,\n y_true=y_test))\n\n\ndef train(x_train, y_train,x_test,y_test_train,model,acc,loss):\n #can't use early stopping with leave one out\n es = EarlyStopping(monitor='val_accuracy', mode='max', verbose=1, patience=20)\n mc=ModelCheckpoint('best_epoch2.h5', monitor='val_accuracy', mode='max', save_best_only=True,verbose=0)\n history=model.fit(x_train, y_train, batch_size=32, epochs=epochs_n,callbacks=[es,mc],validation_data=(x_test,y_test_train),verbose=2)\n #retrieve from history best acc and relative loss from early stopping\n best_epoch = np.argmax(history.history['val_accuracy']) + 1\n acc.append(history.history['val_accuracy'][best_epoch-1])\n loss.append(history.history['val_loss'][best_epoch-1])\n print('best epoch:',best_epoch, ' loss:',loss[-1],' acc:',acc[-1])\n print(loss,acc)\n\n\ndef lstm():\n Multiple_it_acc_mean = []\n Multiple_it_loss_mean = []\n Multiple_it_acc_std = []\n Multiple_it_loss_std = []\n min_max_acc_diff = []\n counter = 1\n data, labels = get_data(data_path, class_labels=class_labels)\n\n if signal_mode == 'fp':\n data = padding(data)\n print(\"\\nEXECUTION PARAMETERS: {NUMBER OF FOLDERS: \",splits,\"}-{NUMBER OF EPOCHS: \",epochs_n,\"}-{NUMBER OF ROUTINE ITERATIONS: \",routine_it,\"}-{BATCH SIZE : \",32,\"}-{SIGNAL MODE: \",signal_mode,\"}-{AUGMENT:\",class_labels[0],\"}-{FEATURES: \",features,\"}-{EMOTIONS:\",class_labels,\"}\")\n\n for i in range(0,routine_it):\n start = time.time()\n K.clear_session()\n print(\"\\n####ITERATION NUMBER: \",i+1)\n it = 0\n # even if class are balanced we do not have many datapoints therefore i use stratifiedKFold\n kf = KFold(n_splits=splits, shuffle=True)\n #leave one out\n #cv = LeaveOneOut()\n acc = []\n loss = []\n for train_index, test_index in kf.split(data, labels):\n print('\\n#####FOLDER NUMBER: ' + str(it + 1))\n x_train, x_test = data[train_index], data[test_index]\n y_train, y_test = labels[train_index], labels[test_index]\n x_train = np.array(x_train)\n x_test = np.array(x_test)\n y_train = np.array(y_train)\n y_test = np.array(y_test)\n if it > 0:\n model.load_weights('model2.u5')\n y_train = np_utils.to_categorical(y_train)\n y_test_train = np_utils.to_categorical(y_test,num_classes=4) #specify num_classes=4 for leave one out\n if it == 0:\n print('Starting LSTM')\n model = Sequential()\n input_shape = x_train[0].shape\n model.add(Masking(mask_value=special_value, input_shape=(input_shape[0], input_shape[1])))\n model.add(LSTM(256, input_shape=(input_shape[0], input_shape[1]), return_sequences=True))\n model.add(Dropout(0.5))\n model.add(LSTM(128, return_sequences=False))\n #model.add(Dropout(0.5))\n # model.add(LSTM(64))\n # model.add(Dropout(0.5))\n #model.add(Dense(64, activation='tanh'))\n #model.add(Dropout(0.5))\n model.add(Dense(len(class_labels), activation='softmax'))\n model.compile(loss='categorical_crossentropy', optimizer='adam',\n metrics=['accuracy'])\n print(model.summary(), file=sys.stderr)\n model.save_weights('model2.u5')\n it += 1\n train(x_train, y_train, x_test, y_test_train, model, acc, loss)\n best_epoch = load_model('best_epoch2.h5')\n evaluate(best_epoch, x_test, y_test)\n print('\\n\\n ############# AVERAGE EVALUATIONS ############')\n print(\"\\n######### MEAN LOSS OVER THE \" + str(splits) + \" FOLDERS: \" + str(np.mean(loss)) + \" ###########\")\n print(\"######### MEAN ACCURACY OVER THE \" + str(splits) + \" FOLDERS: \" + str(np.mean(acc)) + \" ###########\")\n print(\n \"######### LOSS STANDARD DEVIATION OVER THE \" + str(splits) + \" FOLDERS: \" + str(np.std(loss)) + \" ###########\")\n print(\n \"######### ACC STANDARD DEVIATION OVER THE \" + str(splits) + \" FOLDERS: \" + str(np.std(acc)) + \" ###########\")\n Multiple_it_acc_mean.append(np.mean(acc))\n Multiple_it_loss_mean.append(np.mean(loss))\n Multiple_it_acc_std.append(np.std(acc))\n Multiple_it_loss_std.append(np.std(loss))\n min_max_acc_diff.append(np.max(acc) - np.min(acc))\n counter = counter + 1\n gc.collect()\n del model\n print('\\n\\n ############# FINAL AVERAGE EVALUATIONS FOR ITERATIONS ############')\n print(\"\\n#### MEAN OF LOSSES MEAN OVER \", counter, \" ITERATIONS: \", np.mean(Multiple_it_loss_mean),\n \" MEAN OF ACC MEAN OVER \", counter, \" ITERATIONS: \",\n np.mean(Multiple_it_acc_mean), \" #####\")\n print(\"####STD OF MEAN LOSS OVER \", counter - 1, \" ITERATIONS: \", np.std(Multiple_it_loss_mean),\n \" ##### STD OF ACC MEAN OVER \", counter - 1,\n \" ITERATIONS: \", np.std(Multiple_it_acc_mean), \" #####\")\n print(\"#### MEAN LOSSES STANDARD DEVIATIONS OVER \", counter, \" ITERATIONS: \", np.mean(Multiple_it_loss_std),\n \" MEAN ACC STANDARD DEVIATIONS OVER \", counter,\n \" ITERATIONS: \", np.mean(Multiple_it_acc_std), \" #####\")\n print(\"####STANDARD DEVIATION OF LOSS STANDARD DEVIATION OVER \", counter - 1, \" ITERATIONS: \",\n np.std(Multiple_it_loss_std),\n \" STANDARD DEVIATION OF ACC STANDARD DEVIATION OVER \", counter - 1, \" ITERATIONS:\",\n np.std(Multiple_it_acc_std), \"#####\")\n print(\"####AVERAGE MAX-MIN DIFFERENCE OVER \", counter - 1, \" ITERATIONS: \", np.mean(min_max_acc_diff), \" #####\")\n print(\"####STD.DEV MAX-MIN DIFFERENCE OVER \", counter - 1, \" ITERATIONS: \", np.std(min_max_acc_diff), \" #####\")\n end = time.time()\n print(\"\\n####### TIME ELAPSED: \", end - start, \" #######\")\n #sys.stdout = orig_stdout\n #f.close()\nlstm()\n","sub_path":"model/lstm_parselmouth2.py","file_name":"lstm_parselmouth2.py","file_ext":"py","file_size_in_byte":16827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"464944020","text":"import pygame\nfrom game_screen_5 import *\nfrom screen_s import *\n#from code import *\n\nX = 500\nY = 500\nfont_my = pygame.font.SysFont(\"Monospace\",15)\ndef help_screen():\n help_screen = pygame.display.set_mode((X,Y))\n running = 1\n while(running):\n for event in pygame.event.get():\n if(event.type == pygame.QUIT):\n pygame.quit()\n quit()\n elif(event.type == pygame.KEYDOWN):\n if(event.key == pygame.K_b):\n return\n pygame.display.update()\n help_screen.fill((0,0,0))\n rl = font_i.render(\"Rules\",1,(255,255,0))\n x1 = \"The player is given a grid with dots of\"\n y1 = \"different colors. \"\n x2 = \"The player has to join the dots of the same\"\n y2 = \"color in such a way that each of the lines dose\" \n y22 = \"not cross the other \" \n x3 = \"In the present game the controls are the\"\n y3 = \"keyboard for the menu.\" \n x4 = \"The player can connect the dots using the mouse.\"\n x5 = \"If the user uses the wrong direction the line\" \n y5 = \"wont'be shown which is a sign of an error.\" \n x6 = \"The player shouldn't leave any of the grids empty.\"\n rl1 = font_my.render(\"1. \"+str(x1),1,(255,0,0))\n rl11 = font_my.render(\" \"+str(y1),1,(255,0,0))\n rl2 = font_my.render(\"2. \"+str(x2),1,(0,0,255))\n rl22 = font_my.render(\" \"+str(y2),1,(0,0,255))\n rl222 = font_my.render(\" \"+str(y22),1,(0,0,255))\n rl3 = font_my.render(\"3. \"+str(x3),1,(0,255,0))\n rl33 = font_my.render(\" \"+str(y3),1,(0,255,0))\n rl4 = font_my.render(\"4. \"+str(x4),1,(255,128,0))\n rl5 = font_my.render(\"5. \"+str(x5),1,(0,128,255))\n rl55 = font_my.render(\" \"+str(y5),1,(0,128,255))\n rl6 = font_my.render(\"6. \"+str(x6),1,(255,127,0))\n help_screen.blit(rl,(230,80))\n help_screen.blit(rl6,(30,395))\n help_screen.blit(rl1,(30,175))\n help_screen.blit(rl11,(30,190))\n help_screen.blit(rl2,(30,225))\n help_screen.blit(rl22,(30,240))\n help_screen.blit(rl222,(30,255))\n help_screen.blit(rl3,(30,280))\n help_screen.blit(rl33,(30,295))\n help_screen.blit(rl4,(30,320))\n help_screen.blit(rl5,(30,350))\n help_screen.blit(rl55,(30,365))\n pygame.display.flip()\n","sub_path":"help_screen.py","file_name":"help_screen.py","file_ext":"py","file_size_in_byte":2373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"388481319","text":"import CommonTips\r\nimport DumpPhotos\r\nimport OfflineSoRunner\r\nimport FallWithMask\r\nsub_model_name = 'Main Menu'\r\nop_info = '''\\n--------------Main Menu--------------\r\n| 【0】:Dump照片\r\n| 【1】:离线跑算法库\r\n| 【2】:带mask分析跌倒视频''' + CommonTips.tip_ops+'-------------------------------------\\n'\r\nprint(op_info)\r\n\r\nwhile True:\r\n cmd = input('('+sub_model_name+')'+CommonTips.tip_input_cmd)\r\n\r\n if cmd.isdigit():\r\n cmd = int(cmd)\r\n if cmd == 0:\r\n DumpPhotos.main()\r\n elif cmd == 1:\r\n OfflineSoRunner.main()\r\n elif cmd == 2:\r\n FallWithMask.main()\r\n else:\r\n print(CommonTips.tip_arg_error)\r\n else:\r\n if 'h' == cmd.lower():\r\n print(op_info)\r\n elif 'q' == cmd.lower():\r\n print(CommonTips.tip_quit)\r\n break\r\n else:\r\n print(CommonTips.tip_arg_error)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"330912730","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 27 16:05:22 2017\n\n@author: SAA\n\"\"\"\n\n\n# coding: utf-8\n\n# Deep Learning\n# =============\n# \n# Assignment 3\n# ------------\n# \n# Previously in `2_fullyconnected.ipynb`, you trained a logistic regression and a neural network model.\n# \n# The goal of this assignment is to explore regularization techniques.\n\n# In[ ]:\n\n\n# These are all the modules we'll be using later. Make sure you can import them\n# before proceeding further.\nfrom __future__ import print_function\nimport numpy as np\nimport tensorflow as tf\nfrom six.moves import cPickle as pickle\n\n\n# First reload the data we generated in `1_notmnist.ipynb`.\n\n# In[ ]:\n\n\npickle_file = 'notMNIST.pickle'\n\nwith open(pickle_file, 'rb') as f:\n save = pickle.load(f)\n train_dataset = save['train_dataset']\n train_labels = save['train_labels']\n valid_dataset = save['valid_dataset']\n valid_labels = save['valid_labels']\n test_dataset = save['test_dataset']\n test_labels = save['test_labels']\n del save # hint to help gc free up memory\n print('Training set', train_dataset.shape, train_labels.shape)\n print('Validation set', valid_dataset.shape, valid_labels.shape)\n print('Test set', test_dataset.shape, test_labels.shape)\n\n\n# Reformat into a shape that's more adapted to the models we're going to train:\n# - data as a flat matrix,\n# - labels as float 1-hot encodings.\n\n# In[ ]:\n\n\nimage_size = 28\nnum_labels = 10\n\ndef reformat(dataset, labels):\n dataset = dataset.reshape((-1, image_size * image_size)).astype(np.float32)\n # Map 1 to [0.0, 1.0, 0.0 ...], 2 to [0.0, 0.0, 1.0 ...]\n labels = (np.arange(num_labels) == labels[:,None]).astype(np.float32)\n return dataset, labels\ntrain_dataset, train_labels = reformat(train_dataset, train_labels)\nvalid_dataset, valid_labels = reformat(valid_dataset, valid_labels)\ntest_dataset, test_labels = reformat(test_dataset, test_labels)\nprint('Training set', train_dataset.shape, train_labels.shape)\nprint('Validation set', valid_dataset.shape, valid_labels.shape)\nprint('Test set', test_dataset.shape, test_labels.shape)\n\n\n\n# In[ ]:\n\ndef accuracy(predictions, labels):\n return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))\n / predictions.shape[0])\n\ntrain_subset = 10000\n\nbatch_size = 128\nhiddenlayer_num1 = 1024\nhiddenlayer_num2 = 300\nhiddenlayer_num3 = 50\n\nlam1 = .001/3\nlam2 = .001/3\nlam3 = .001/3\nlam4 = .001/3\n\ngraph = tf.Graph()\nwith graph.as_default():\n\n # Input data. For the training data, we use a placeholder that will be fed\n # at run time with a training minibatch.\n tf_train_dataset = tf.placeholder(tf.float32,\n shape=(batch_size, image_size * image_size))\n tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))\n tf_valid_dataset = tf.constant(valid_dataset)\n tf_test_dataset = tf.constant(test_dataset)\n \n # Decay learning\n global_step = tf.Variable(0, trainable=False)\n starter_learning_rate = 0.5\n end_learning_rate = 0.05\n decay_steps = 10000\n learning_rate = tf.train.polynomial_decay(starter_learning_rate, global_step,\n decay_steps, end_learning_rate,\n power=1)\n \n # Variables.\n weights1 = tf.Variable(\n tf.truncated_normal([image_size * image_size, hiddenlayer_num1],stddev=.12))\n weights2 = tf.Variable(\n tf.truncated_normal([hiddenlayer_num1, hiddenlayer_num2],stddev=.12))\n weights3 = tf.Variable(\n tf.truncated_normal([hiddenlayer_num2, hiddenlayer_num3],stddev=.12))\n weights4 = tf.Variable(\n tf.truncated_normal([hiddenlayer_num3, num_labels],stddev=.12))\n \n biases1 = tf.Variable(tf.ones([hiddenlayer_num1]))\n biases2 = tf.Variable(tf.ones([hiddenlayer_num2]))\n biases3 = tf.Variable(tf.ones([hiddenlayer_num3]))\n biases4 = tf.Variable(tf.ones([num_labels]))\n \n # Training computation.\n logits1 = tf.exp(-tf.square(tf.matmul(tf_train_dataset, weights1) + biases1))\n logits2 = tf.exp(-tf.square(tf.matmul(logits1, weights2) + biases2))\n logits3 = tf.exp(-tf.square(tf.matmul(logits2, weights3) + biases3))\n logits4 = tf.matmul(logits3, weights4) + biases4\n \n loss = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(labels=tf_train_labels, logits=logits4))\n loss = loss + lam1 * tf.nn.l2_loss(weights1) + lam2 * tf.nn.l2_loss(weights2) + lam3 * tf.nn.l2_loss(weights3) + lam4 * tf.nn.l2_loss(weights4)\n # Optimizer.\n optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss,global_step=global_step)\n \n # Predictions for the training, validation, and test data.\n train_prediction = tf.nn.softmax(logits4)\n valid_prediction = tf.nn.softmax(\n tf.matmul(tf.exp(-tf.square(tf.matmul(tf.exp(-tf.square(tf.matmul(tf.exp(-tf.square(tf.matmul(tf_valid_dataset, weights1) + biases1)), weights2) + biases2)), weights3) + biases3)),weights4)+biases4)\n test_prediction = tf.nn.softmax(tf.matmul(tf.exp(-tf.square(tf.matmul(tf.exp(-tf.square(tf.matmul(tf.exp(-tf.square(tf.matmul(tf_test_dataset, weights1) + biases1)), weights2) + biases2)), weights3) + biases3)),weights4)+biases4)\n#########################################################\nnum_steps = 10001\n\nwith tf.Session(graph=graph) as session:\n tf.global_variables_initializer().run()\n print(\"Initialized\")\n for step in range(num_steps):\n # Pick an offset within the training data, which has been randomized.\n # Note: we could use better randomization across epochs.\n offset = (step * batch_size) % (train_labels.shape[0] - batch_size)\n # Generate a minibatch.\n batch_data = train_dataset[offset:(offset + batch_size), :]\n batch_labels = train_labels[offset:(offset + batch_size), :]\n # Prepare a dictionary telling the session where to feed the minibatch.\n # The key of the dictionary is the placeholder node of the graph to be fed,\n # and the value is the numpy array to feed to it.\n feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}\n _, l, predictions = session.run(\n [optimizer, loss, train_prediction], feed_dict=feed_dict)\n if (step % 500 == 0):\n print(\"Minibatch loss at step %d: %f\" % (step, l))\n print(\"Minibatch accuracy: %.1f%%\" % accuracy(predictions, batch_labels))\n print(\"Validation accuracy: %.1f%%\" % accuracy(\n valid_prediction.eval(), valid_labels))\n print(\"Test accuracy: %.1f%%\" % accuracy(test_prediction.eval(), test_labels))\n\n# ---\n","sub_path":"DL_Identifier.py","file_name":"DL_Identifier.py","file_ext":"py","file_size_in_byte":6424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"394039446","text":"import json\nimport os\nfrom transaction import transaction\nfrom dbInstance import mongodb\nimport sys\n\n# This class will parase messge in string into json and do relevent analysis to it\nclass message(object):\n # init message in string and client as socket connection, transaction list as dictionary\n def __init__(self, message,client, translst):\n print('message:', message)\n self.__transferLst = translst\n self.__userlst = None\n self.__message = message\n self.__jsonpara = None\n self.__client = client\n # connect to database and take use of function in class dbInstance \n self.__dbinstance = mongodb('http://ec2-52-36-241-1.us-west-2.compute.amazonaws.com:5984/')\n # the format of message must be json\n try:\n self.__jsonpara = json.loads(message)\n print('message init in')\n except AttributeError:\n print('message type is not correctL ',type(message))\n # the json must include userid and type\n try:\n self.__user =('user'+ self.__jsonpara['UserId']).lower()\n self.__type = self.__jsonpara['Type']\n except:\n # lock userid and type ,then it is considered as admain model\n print('message user name error')\n self.__user = 'admain'\n self.__type = 'admain'\n self.__userInstance = None\n \n # Return the type of message \n def get_type(self):\n return self.__type\n\n # Return the parased json message \n def get_jsonpara(self):\n return self.__jsonpara\n \n # return the userid\n def get_user(self):\n return self.__user\n\n # return the user list\n def set_userlst(self, userlst):\n self.__userlst = userlst\n\n # return the user instance\n def set_userInstance(self, user):\n self.__userInstance = user\n\n # add http head to message which will be send to client\n def add_head(self,message):\n content = 'HTTP/1.x 200 ok\\r\\nContent-Type: text/html\\r\\n\\r\\n'\n #content += '
register successs!
'\n content += 'URL\\r\\n'\n content += message\n return content\n \n # Analysis the message according to it type\n def run(self):\n print('run in')\n # if user need to add new card to database\n if self.__type == 'CardRegister':\n cardNumber = self.__jsonpara['CardInfo']['CardNumber']\n holderName = self.__jsonpara['CardInfo']['HolderName']\n expireDate = self.__jsonpara['CardInfo']['ExpireDate']\n csv = self.__jsonpara['CardInfo']['CSV']\n self.__client.sendall(bytes(self.add_head(json.dumps({'Result':True})), encoding=\"utf8\"))\n self.__client.close()\n self.__dbinstance.createUser(self.__user)\n self.__dbinstance.userInfomationGenerate(self.__user,cardNumber,holderName,expireDate,csv)\n print('Saved')\n # if User need to get card information from database\n elif self.__type == 'GetCardInfo':\n print('getCardInfo')\n filename = os.getcwd()+ '/' + self.__user + '.json'\n print(self.__user)\n message = self.__dbinstance.get_card(self.__user)\n self.__client.sendall(bytes(self.add_head(json.dumps(message)), encoding=\"utf8\"))\n self.__client.close()\n # if user need to start a transaction\n elif self.__type == 'TransferStart':\n uuid = self.__jsonpara['Uuid']\n amount = self.__jsonpara['Amount']\n try:\n db = self.__dbinstance.get_couch()[self.__user]\n a = db.get('CardInfo')['DefaultCard']['balance']\n except:\n a = -1\n if int(a) < int(amount):\n self.__client.sendall(bytes(self.add_head(json.dumps({'Result':False})), encoding=\"utf8\"))\n self.__client.close()\n else:\n transinstance = transaction(self.__user,amount)\n if not uuid in self.__transferLst.get_lst().keys():\n self.__transferLst.get_lst()[uuid] = transinstance\n self.__client.send(bytes(self.add_head(json.dumps({'Result':True})), encoding=\"utf8\"))\n # transinstance.run()\n try: \n self.__dbinstance.del_amount(self.__user,amount)\n except:\n print (sys.exc_info())\n self.__client.close()\n print('statt sended')\n # transinstance.run()\n # if user need to end a transaction\n elif self.__type == 'TransferEnd':\n uuid = self.__jsonpara['Uuid']\n try:\n transinstance = self.__transferLst.get_lst()[uuid]\n amount = transinstance.get_amount()\n self.__client.send(bytes(self.add_head(json.dumps({'Result':True,'balance':'$'+amount+'.00'})), encoding=\"utf8\"))\n transinstance.set_userlst(self.__userlst)\n transinstance.set_credit(self.__user)\n try:\n self.__dbinstance.add_amount(self.__user,amount)\n except:\n print (sys.exc_info())\n del self.__transferLst.get_lst()[uuid]\n self.__client.close()\n except:\n self.__client.sendall(bytes(self.add_head(json.dumps({'Result':False})), encoding=\"utf8\"))\n self.__client.close()\n","sub_path":"server_end/message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":5472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"210297702","text":"# This model simulates the interaction of Person agents, and the\r\n# transmission of disease between connected agents. Loosely based on \r\n# the SIR model (susceptible, infectious, recovered).\r\n# \r\n# Infected agents may infect those in their network that are not resistant,\r\n# and those who recover may become resistant to future infection (immune).\r\n#\r\n# We also introduced the concept of 'diagnosis' which may indicate \r\n# the disease being discovered. Once an infection is diagnosed, \r\n# there is a possibility of recovery, and a lower probability of infecting \r\n# others.\r\n#\r\n# Probabilities of events happening (infection, resistance, diagnosis, etc.)\r\n# can be tweaked (built upon) to model factors such as immunisation, better \r\n# access to diagnosis, disease strains/evolution, outbreak management, etc.\r\n# \r\n# In the current implemenetation, we're not aiming for a realistic model, \r\n# but rather a model than can test features in Papaya, namely:\r\n# * Extra task parallelism based on analysis of data dependencies\r\n# * Branching in states\r\n# * heterogenous interation topologies (Euclidean Space + Network)\r\n# * Scalar + Array datatypes in agent memory\r\n# * Environment constants\r\n# * Visualisation + (maybe) computational steering\r\n# * (in future) topology-aware message boards\r\n\r\nfrom papaya.core.agent import Agent, StateTransition, StateBranch\r\nfrom papaya.core.datatype import Variable, Array\r\n\r\nclass Person(Agent):\r\n id = Variable(\"int\", unique=True, constant=True)\r\n x = Variable(\"float\")\r\n y = Variable(\"float\")\r\n infected = Variable(\"bool\", default=False)\r\n diagnosed = Variable(\"bool\", default=False)\r\n resistant = Variable(\"bool\", default=False)\r\n connections = Array(\"int\", max_size=10)\r\n fx_crowd = Variable(\"float\", loop_temp=True, default=0.0)\r\n fy_crowd = Variable(\"float\", loop_temp=True, default=0.0)\r\n fx_connections = Variable(\"float\", loop_temp=True, default=0.0)\r\n fy_connections = Variable(\"float\", loop_temp=True, default=0.0)\r\n \r\n state_transitions = [\r\n (\"START\", \"output_location\", \"DISEASE_TRANSMISSION_START\"),\r\n (\"DISEASE_TRANSMISSION_START\", \"is_infectious\",\r\n [ # branching of states\r\n (\"transmit_disease\", \"POST_SPREAD\", \"infectious\"),\r\n (\"update_resistance\", \"DISEASE_TRANSMISSION_END\", \"resistant\"),\r\n (\"update_infection_status\", \"POSSIBLY_INFECTED\", \"susceptible\"),\r\n ]),\r\n (\"POST_SPREAD\", \"diagnosis_and_recovery\", \"DISEASE_TRANSMISSION_END\"),\r\n (\"POSSIBLY_INFECTED\", \"diagnosis\", \"DISEASE_TRANSMISSION_END\"),\r\n (\"DISEASE_TRANSMISSION_END\", \"calculate_crowd_forces\", \"CROWD_FORCES_CALCULATED\"),\r\n (\"CROWD_FORCES_CALCULATED\", \"calculates_connection_forces\", \"CONNECTION_FORCES_CALCULATED\"),\r\n (\"CONNECTION_FORCES_CALCULATED\", \"update_connections\", \"CONNECTIONS_UPDATED\"),\r\n (\"CONNECTIONS_UPDATED\", \"move\", \"MOVED\"),\r\n ]\r\n \r\n class output_location(StateTransition):\r\n \"\"\"Posts id, x, y\"\"\"\r\n reads = [\"id\", \"x\", \"y\"]\r\n msg_posts = [\"Location\"]\r\n\r\n class is_infectious(StateBranch):\r\n reads = [\"infected\", \"resistant\"]\r\n def next_state(self):\r\n if self.get_mem(\"infected\"):\r\n return \"POST_SPREAD\"\r\n elif self.get_mem(\"resistant\"):\r\n return \"DISEASE_TRANSMISSION_END\"\r\n else:\r\n return \"POSSIBLY_INFECTED\"\r\n \r\n # if infected \r\n class transmit_disease(StateTransition): \r\n \"\"\" \r\n If infected, transmit disease.\r\n PROB_INFECT: probability that a connection gets infected\r\n PROB_INFECT_IF_DIAGNOSED: lower infection probability if diagnosed\r\n \"\"\"\r\n reads = [\"infected\", \"diagnosed\", \"connections\", \"id\"]\r\n msg_posts = [\"DiseaseInfection\"]\r\n \r\n class diagnosis_and_recovery(StateTransition):\r\n \"\"\"\r\n PROB_DIAGNOSED: probability that an infection is diagnosed\r\n PROB_RECOVERY: if diagnosed, probability of recovery from disease\r\n PROB_RESISTANCE: once cured, the probably becoming resistant\r\n \r\n Note: 'diagnosed' must be reset to False once recovered\r\n If recovery fails, there's still a chance in the next iteration \r\n \"\"\"\r\n reads = [\"diagnosed\"]\r\n writes = [\"infected\", \"diagnosed\", \"resistant\"]\r\n \r\n # if resistant \r\n class update_resistance(StateTransition):\r\n \"\"\"\r\n PROB_LOOSE_RESISTANCE: probability of loosing resistance\r\n \"\"\"\r\n reads = [\"resistant\"]\r\n writes = [\"resistant\"]\r\n \r\n # if susceptible (!infected && !resistant)\r\n class update_infection_status(StateTransition): \r\n \"\"\"Checks messages to see if agent gets infected\"\"\"\r\n writes = [\"infected\"]\r\n msg_reads = [\"DiseaseInfection\"]\r\n\r\n class diagnosis(StateTransition):\r\n \"\"\"\r\n PROB_DIAGNOSED: probability that an infection is diagnosed\r\n \"\"\"\r\n reads = [\"infected\"]\r\n writes = [\"diagnosed\"]\r\n \r\n class calculate_crowd_forces(StateTransition):\r\n \"\"\"\r\n Calculates forces based on agents within INFLUENCE_RADIUS\r\n \"\"\"\r\n reads = [\"x\", \"y\"]\r\n writes = [\"fx_crowd\", \"fy_crowd\"]\r\n msg_reads = [\"Location\"]\r\n \r\n class calculates_connection_forces(StateTransition):\r\n \"\"\"\r\n Calculated forces based on connected agents\r\n \"\"\"\r\n reads = [\"x\", \"y\", \"connections\"]\r\n writes = [\"fx_connections\", \"fy_connections\"]\r\n msg_reads = [\"Location\"]\r\n \r\n class update_connections(StateTransition):\r\n \"\"\"\r\n PROB_UNFRIEND: probabily of a connection being dropped\r\n PROB_FRIEND: probability of an agent within INFLUENCE_RADIUS \r\n being added as a connection\r\n MAX_CONNECTIONS: maximum number of connections\r\n \"\"\"\r\n reads = [\"x\", \"y\", \"connections\"]\r\n writes = [\"connections\"]\r\n msg_reads = [\"Location\"]\r\n \r\n class move(StateTransition):\r\n \"\"\"\r\n Change location based on various forces\r\n \"\"\"\r\n reads = [\"x\", \"y\", \"fx_crowd\", \"fy_crowd\", \r\n \"fx_connections\", \"fy_connections\"]\r\n writes = [\"x\", \"y\"]\r\n\r\n \r\n \r\n ","sub_path":"examples/sim_infection/infection/agents.py","file_name":"agents.py","file_ext":"py","file_size_in_byte":6295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"447746295","text":"from unidecode import unidecode\nfrom datetime import datetime\nimport locale\nimport re\nimport pandas as pd\n\nservices = open(\"services/services.txt\", \"r\").read().split(\"\\n\")\nservices = [s.strip() for s in services if s]\n\n\ndef lydia(df):\n def find_price(x):\n res1 = re.findall(r\"VOUS A RÉGLÉ \\d+,\\d+ €\", x)\n res2 = re.findall(r\"\\d+,\\d+ € PAY\", x)\n res3 = re.findall(r\"VOUS AVEZ RÉGLÉ \\d+,\\d+ €\", x)\n if len(res1) == 1:\n return float(re.findall(r\"\\d+,\\d+\", res1[0])[0].replace(',', '.'))\n elif len(res2) == 1:\n return - float(re.findall(r\"\\d+,\\d+\", res2[0])[0].replace(',', '.'))\n elif len(res2) == 1:\n return - float(re.findall(r\"\\d+,\\d+\", res3[0])[0].replace(',', '.'))\n else:\n return None\n\n mails_lydia = df.loc[df.cat == 'lydia'][['date', 'body']]\n\n if mails_lydia.shape[0] == 0:\n return []\n\n mails_lydia['amount'] = mails_lydia.body.apply(find_price)\n mails_lydia = mails_lydia.loc[mails_lydia.amount.isna() == False]\n\n return mails_lydia[['date', 'amount']].to_dict('records')\n # {'date' : list(mails_lydia['date'].values),\n # 'transaction' : list(mails_lydia['transaction'].values)\n # }\n\n\ntry:\n # if error here set it to fr_FR or check locale -a\n locale.setlocale(locale.LC_TIME, \"fr_FR.utf8\")\nexcept:\n # if error here set it to fr_FR or check locale -a\n locale.setlocale(locale.LC_TIME, \"fr_FR\")\n\n\ndef doctolib(df):\n mails_doc = df.loc[df.cat == 'doctolib']\n\n mails_doc = mails_doc.loc[mails_doc.snippet.str.contains(\"confirmé\")]\n\n res = []\n\n for date_mail, el in mails_doc.loc[:, ['date', 'body']].itertuples(index=False):\n date = datetime.fromtimestamp(int(date_mail)/1000)\n tmp = {}\n\n try:\n name = re.findall(r'RDV confirmé \\n \\n(.*?) \\n', el)[0]\n except:\n name = None\n tmp['name'] = name\n\n try:\n spe = re.findall(name + ' \\n(.*?) \\n', el)[0]\n except:\n spe = None\n tmp['spe'] = spe\n\n try:\n date = re.findall(spe + ' \\n(.*?) \\n', el)[0]\n date = datetime.strptime(date, '%A %d %B a %Hh%M')\n date = date.replace(year=date_mail.year)\n tmp['date'] = str(int(date.timestamp()*1000))\n except:\n date = None\n tmp['date'] = None\n\n try:\n address = re.findall(\n r\"Accès & informations \\n \\n(.*?) \\n \\nOBTENIR L'ITINERAIRE\", el, re.DOTALL)[0]\n address = ''.join(address.split('\\n'))\n except:\n address = None\n tmp['address'] = address\n\n res += [tmp]\n return res\n\n\ndef amazon(df):\n # restriction to amazon emails\n df_amazon = df.loc[df.cat == 'amazon']\n # get all amazon orders\n df_amazon = df_amazon.loc[df_amazon.body.str.contains(\"Votre Expédition \")]\n\n res = []\n\n for el in df.body:\n tmp = {}\n # total cost parsing\n try:\n amount = re.findall(\n r'Montant total pour cet envoi : EUR \\d{0,100000},\\d\\d', el)[0]\n amount = re.findall(r'\\d{0,100000},\\d\\d', amount)[0]\n # replacing the , with . if necessary\n amount = amount.replace(\",\", \".\")\n tmp['amount'] = float(amount)\n except:\n continue\n # payment tool parsing\n try:\n tmp['payment_tool'] = re.findall(r'Payé par (.*?): ', el)[0]\n except:\n tmp['payment_tool'] = ''\n # delivering date parsing\n try:\n date = re.findall(r'Livraison : \\n (.*?) \\n \\n', el, re.DOTALL)[0]\n date = datetime.strptime(date, '%A %d %B %Y')\n\n except:\n date = None\n\n # converting to unix millisecs\n tmp['date'] = str(date.timestamp()*1000)\n\n # parsing products\n products = re.findall(r'\\(Vendu par (.*?) \\n \\n', el, re.DOTALL)\n\n # if no product pass\n if products == []:\n continue\n\n tmp['articles'] = []\n\n for prod in products:\n res_prod = {}\n res_prod['seller'] = prod.split(') :')[0]\n\n prod = prod.split('\\n ')[1:]\n\n res_prod['article'] = prod[0]\n price = re.findall(r'\\d{0,100000},\\d\\d', prod[1])[0]\n # replacing the , with . if necessary\n price = price.replace(\",\", \".\")\n res_prod['price'] = float(price)\n\n tmp['articles'] += [res_prod]\n res += [tmp]\n return res\n\n\ndef uber_rides(df):\n df_uber = df.loc[df.cat == 'uber']\n\n res = []\n\n for date, el in df_uber.loc[:, ['date', 'body']].itertuples(index=False):\n date = datetime.fromtimestamp(int(date)/1000)\n tmp = {}\n # place\n try:\n departure, destination = re.findall(\n r'\\d{1,2}:\\d{1,2} (.*?) \\d{1,2}:\\d{1,2} (.*?) Invitez', el)[0]\n tmp['departure'] = departure\n tmp['destination'] = destination\n except:\n continue\n\n # price\n try:\n price = re.findall(r' Total: \\d{1,1000},\\d{2,1000} € ', el)[0]\n price = re.findall(r'\\d{1,1000},\\d{2,1000}', price)[0]\n tmp['price'] = price\n except:\n price = None\n tmp['price'] = price\n pass\n\n # distance\n try:\n distance = re.findall(r'\\d{1,1000}.{0,1}\\d{0,1000} km', el)[0]\n tmp['distance'] = distance\n except:\n distance = None\n tmp['distance'] = distance\n pass\n\n # date\n try:\n horaire = re.findall(r'\\d{1,2}:\\d\\d', el)\n start = datetime.strptime(horaire[0], '%H:%M')\n end = datetime.strptime(horaire[1], '%H:%M')\n start = start.replace(\n year=date.year, month=date.month, day=date.day)\n end = end.replace(year=date.year, month=date.month, day=date.day)\n tmp['start'] = str(int(start.timestamp()*1000))\n tmp['end'] = str(int(end.timestamp()*1000))\n except:\n start = None\n end = None\n tmp['start'] = start\n tmp['end'] = end\n pass\n\n res += [tmp]\n\n return res\n\n\ndef uber_jump(df):\n df_uber = df.loc[df.cat == 'uber']\n\n df_uber = df_uber.loc[df_uber.body.str.contains(\"vélos électriques\")]\n\n res = []\n\n for date, el in df_uber.loc[:, ['date', 'body']].itertuples(index=False):\n date = datetime.fromtimestamp(int(date)/1000)\n tmp = {}\n # place\n try:\n departure, destination = re.findall(\n r'\\d{1,2}:\\d\\d (.*?) \\d{1,2}:\\d\\d (.*?) contacter l\\'assistance', el)[0]\n tmp['departure'] = departure\n tmp['destination'] = destination\n except:\n continue\n\n # price\n try:\n price = re.findall(r' Total: \\d{1,1000},\\d{2,1000} € ', el)[0]\n price = re.findall(r'\\d{1,1000},\\d{2,1000}', price)[0]\n tmp['price'] = price\n except:\n price = None\n tmp['price'] = price\n pass\n\n # distance\n try:\n distance = re.findall(\n r'\\d{1,1000}.{0,1}\\d{0,1000} kilomètres', el)[0]\n tmp['distance'] = distance\n except:\n distance = None\n tmp['distance'] = distance\n pass\n\n # date\n try:\n horaire = re.findall(r'\\d{1,2}:\\d\\d', el)\n start = datetime.strptime(horaire[0], '%H:%M')\n end = datetime.strptime(horaire[1], '%H:%M')\n start = start.replace(\n year=date.year, month=date.month, day=date.day)\n end = end.replace(year=date.year, month=date.month, day=date.day)\n tmp['start'] = str(int(start.timestamp() * 1000))\n tmp['end'] = str(int(end.timestamp() * 1000))\n except:\n start = None\n end = None\n tmp['start'] = start\n tmp['end'] = end\n pass\n\n res += [tmp]\n\n return res\n\n\ndef uber_eats(df):\n df_uber = df.loc[:1, :]\n\n res = []\n for date, el in df_uber.loc[:, ['date', 'body']].itertuples(index=False):\n tmp = {}\n\n # price\n try:\n price = re.findall(r'Total: \\d{1,1000},\\d{2,1000} € ', el)[0]\n price = re.findall(r'\\d{1,1000},\\d{2,1000}', price)[0]\n tmp['price'] = price\n except:\n price = None\n tmp['price'] = price\n pass\n\n # restaurant\n try:\n restaurant = re.findall(r'commandé chez (.*?)\\.', el)[0]\n tmp['restaurant'] = restaurant\n except:\n tmp['restaurant'] = None\n pass\n\n # articles\n try:\n cmd = re.findall(r'Total (.*?) Montant facturé ', el, re.DOTALL)[0]\n articles = re.findall(\n r'\\d{1,3} \\n(.*?) \\n\\d{1,1000},\\d\\d €', cmd, re.DOTALL)[:-1]\n tmp['articles'] = articles\n except:\n tmp['articles'] = None\n pass\n\n # date\n try:\n date = re.findall(\n r'Total: \\d{1,1000},\\d{2,1000} € \\n(.*?) \\n', el)[0]\n date = datetime.strptime(date, '%a, %b %d, %Y')\n date = str(date.timestamp()*1000)\n tmp['date'] = date\n except:\n tmp['date'] = None\n pass\n\n res += [tmp]\n\n return res\n\n\ndef tag_mail(header):\n \"\"\"\n tag_mail will tag if the mail is in the list of structured emails\n\n input:\n header : is the header of the email\n output:\n name of the category, or 'no_cat' if non were find\n\n usage example: df['cat'] = df.headers.apply(tag_mail)\n \"\"\"\n comp_list = ['linkedin', 'facebook', 'lydia', 'spotify', 'sncf', 'twitter', 'tinder', 'deezer', 'itunes',\n 'apple', 'google', 'uber', 'ubereats', 'doctolib', 'instagram', 'amazon']\n\n if header == []:\n return None\n for k in header:\n if k['name'] == 'From':\n L_cat = [el * (el in k['value']) for el in comp_list]\n L_cat = list(filter(lambda x: x != '', L_cat))\n if L_cat == []:\n return None\n elif len(L_cat) > 2:\n print(L_cat)\n return L[cat[0]]\n elif len(L_cat) == 1:\n return L_cat[0]\n else:\n return None\n\n\ndef format_mail(text):\n text = text.lower()\n text = unidecode(text)\n return text\n\n\ndef clean_and_tokenize(text):\n \"\"\"\n Cleaning a document with:\n - Lowercase\n - Removing numbers with regular expressions\n - Removing punctuation with regular expressions\n - Removing other artifacts\n And separate the document into words by simply splitting at spaces\n Params:\n text (string): a sentence or a document\n Returns:\n tokens (list of strings): the list of tokens (word units) forming the document\n \"\"\"\n # Lowercase\n text = text.lower()\n # Remove numbers\n text = re.sub(r\"[0-9]+\", \"\", text)\n # Remove punctuation\n REMOVE_PUNCT = re.compile(\"[.;:!\\'?,\\\"()\\[\\]]\")\n text = REMOVE_PUNCT.sub(\"\", text)\n # Remove small words (1 and 2 characters)\n text = re.sub(r\"\\b\\w{1,2}\\b\", \"\", text)\n # Remove HTML artifacts specific to the corpus we're going to work with\n REPLACE_HTML = re.compile(\"(
)|(\\-)|(\\/)\")\n text = REPLACE_HTML.sub(\" \", text)\n\n tokens = text.split()\n return tokens\n\n\n# Call each function referenced in the services.txt file\n\n\n# def extract_services(df):\n# res = {}\n# for s in services:\n# s = s.strip()\n# res[s] = globals()[s](df)\n# return res\n\n\nservice_to_redis_mapping = {\n \"lydia\": \"toDisplay.lydia\",\n \"doctolib\": \"toDisplay.doctolib\",\n \"amazon\": \"toDisplay.amazon\",\n \"uber_rides\": \"toDisplay.uberRides\",\n \"uber_jump\": \"toDisplay.uberBikes\",\n \"uber_eats\": \"toDisplay.uberEats\"\n}\n\n\ndef extract_services(df):\n res = {}\n for service in services:\n service = service.strip()\n redis_field = service_to_redis_mapping[service]\n yield service, redis_field, globals()[service](df)\n return\n","sub_path":"server/services/utils/extract.py","file_name":"extract.py","file_ext":"py","file_size_in_byte":12229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"159729348","text":"from rest_framework import status as s\nfrom rest_framework.authentication import SessionAuthentication\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\nfrom rest_framework.request import Request\nfrom rest_framework.views import APIView\n\nfrom icebook.apps.posts.models import Post\nfrom icebook.apps.posts.serializers import CommentSerializer\n\n\nclass CommentView(APIView):\n \"\"\"List all Posts.\"\"\"\n\n authentication_classes = [SessionAuthentication]\n permission_classes = [IsAuthenticated]\n\n @staticmethod\n def get(request: Request) -> Response:\n post_id: str = request.query_params.get(\"post_id\")\n\n if not post_id:\n error_dict: dict = {\"Error\": \"post_id query parameter not given!\"}\n return Response(error_dict, status=s.HTTP_400_BAD_REQUEST)\n\n post = Post.objects.prefetch_related(\"comment_set__user__profile\").get(\n id=int(post_id)\n )\n\n post_comments = post.comment_set.all()\n\n serializer = CommentSerializer(\n post_comments, many=True, context={\"request\": request}\n )\n\n return Response(serializer.data)\n\n @staticmethod\n def post(request: Request) -> Response:\n data = request.data.copy()\n data[\"post\"] = int(data[\"post\"])\n serializer = CommentSerializer(data=data, context={\"request\": request})\n\n if serializer.is_valid():\n serializer.save(user=request.user)\n return Response(serializer.data, status=s.HTTP_201_CREATED)\n\n return Response(serializer.errors)\n","sub_path":"icebook/apps/posts/api/views/comments.py","file_name":"comments.py","file_ext":"py","file_size_in_byte":1585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"471565403","text":"import Tkinter as tk\nfrom PIL import Image, ImageTk\nfrom sys import platform as SYS_PLATFORM\n\ndef escape(x):\n res = \"\"\n for i in x:\n if i == \"\\\\\": # If it equals a single backslash.\n res += \"\\\\\\\\\" # Add two backslashes\n else:\n res += i # Add whatever the character was\n return res\n\nif \"windows\" in SYS_PLATFORM.lower():\n file_delimeter = \"\\\\\"\nelse:\n file_delimeter = \"/\"\n\nclass ExpandImage(tk.Frame):\n def __init__(self, parent, picturePath):\n tk.Frame.__init__(self, parent)\n self.parent = parent\n self.height = self.winfo_reqheight()\n self.width = self.winfo_reqwidth()\n self.parent.title(\"Climatewatch Data Toolbox - \"+self.stripFileName(picturePath))\n \n self.picturePath = picturePath\n self.original = Image.open(escape(self.picturePath))\n self.aspectRatio = float(self.original.size[0])/float(self.original.size[1])\n photo = ImageTk.PhotoImage(self.original)\n self.image = tk.Label(self, image=photo, bg=\"#000000\")\n self.image.photo = photo\n self.image.pack(fill = tk.BOTH, expand = True)\n self.pack(fill = tk.BOTH, expand = True)\n \n self.image.bind(\"\", self.updateDimens)\n \n def updateDimens(self, event):\n self.parent.geometry(str(event.width)+\"x\"+str(event.height))\n if self.aspectRatio < float(event.width)/float(event.height):\n photo = ImageTk.PhotoImage(self.original.resize((int(event.height*self.aspectRatio), event.height)))\n else:\n photo = ImageTk.PhotoImage(self.original.resize((event.width, int(event.width/self.aspectRatio))))\n self.image.config(image=photo)\n self.image.photo = photo\n \n def stripFileName(self, fileName):\n temp = \"\"\n for i in reversed(fileName):\n if i == file_delimeter:\n break\n else:\n temp = i + temp\n return temp\n \n def __del__(self):\n self.image.destroy()\n","sub_path":"Version_4/version_4_1/ExpandImage.py","file_name":"ExpandImage.py","file_ext":"py","file_size_in_byte":2040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"296320673","text":"from Bio import Entrez\n\n\nclass NCBI_Organism:\n \"\"\"\n Organism object, issued from the GenBank databas\n \"\"\"\n\n def __init__(self, id=None):\n self.id = id\n self.taxonomy = None\n self.lineage = None\n self.species = None\n self.family = None\n self.genus = None\n self.order = None\n self.subclass = None\n self.classe = None\n self.phylum = None\n\n self._set_taxonomy()\n self._set_lineage()\n if self.taxonomy is not None and len(self.taxonomy) > 0:\n self._set_properties()\n\n def _set_taxonomy(self):\n \"\"\"Set the attribute taxonomy\"\"\"\n if self.id is not None:\n get_taxonomy = Entrez.efetch(db=\"Taxonomy\", id=self.id, retmode=\"xml\")\n self.taxonomy = Entrez.read(get_taxonomy)\n\n def _set_lineage(self):\n \"\"\"Set the attribute lineage\"\"\"\n if self.taxonomy is not None:\n self.lineage = self.taxonomy[0][\"LineageEx\"]\n\n def _set_properties(self):\n \"\"\"Set all the variables with the values\"\"\"\n self.species = self.taxonomy[0][\"ScientificName\"]\n genus = next((item for item in self.lineage if item[\"Rank\"] == \"genus\"), None)\n self.genus = genus[\"ScientificName\"] if genus is not None else None\n family = next((item for item in self.lineage if item[\"Rank\"] == \"family\"), None)\n self.family = family[\"ScientificName\"] if family is not None else None\n order = next((item for item in self.lineage if item[\"Rank\"] == \"order\"), None)\n self.order = order[\"ScientificName\"] if order is not None else None\n subclass = next((item for item in self.lineage if item[\"Rank\"] == \"subclass\"), None)\n self.subclass = subclass[\"ScientificName\"] if subclass is not None else None\n classe = next((item for item in self.lineage if item[\"Rank\"] == \"class\"), None)\n self.classe = classe[\"ScientificName\"] if classe is not None else None\n phylum = next((item for item in self.lineage if item[\"Rank\"] == \"phylum\"), None)\n self.phylum = phylum[\"ScientificName\"] if phylum is not None else None\n","sub_path":"objects/NCBI_Organism.py","file_name":"NCBI_Organism.py","file_ext":"py","file_size_in_byte":2128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"3314645","text":"from flask import request\nfrom flask_restful import Resource\nfrom marshmallow import ValidationError\nfrom sqlalchemy.exc import IntegrityError\n\nfrom app.main.models import Animal\nfrom ..schemas import AnimalSchema as Schema\nfrom ... import db\n\n\nclass AnimalResource(Resource):\n\n def get(self, id):\n animal = Animal.query.get(id)\n if animal is None:\n return \"Animal not found\", 404\n else:\n return Schema.PUT().dump(animal)\n\n def put(self, id):\n animal = Animal.query.get(id)\n if animal is None:\n return \"Animal not found\", 404\n else:\n try:\n data = request.get_json()\n schema = Schema.PUT()\n new_animal = schema.load(data)\n except ValidationError as err:\n return err.messages, 400\n try:\n db.session.delete(animal)\n db.session.add(new_animal)\n db.session.commit()\n except IntegrityError as err:\n db.session.rollback()\n return f\"ID {new_animal.id} already exists.\", 400\n return \"put-ed\", 200\n\n def patch(self, id):\n animal = Animal.query.get(id)\n if animal is None:\n return \"Animal not found\", 404\n else:\n try:\n data = request.get_json()\n schema = Schema.PATCH()\n new_animal = schema.load(data)\n except ValidationError as err:\n return err.messages, 400\n for key, value in new_animal.items():\n setattr(animal, key, value)\n try:\n db.session.add(animal)\n db.session.commit()\n except IntegrityError as err:\n db.session.rollback()\n return f\"ID {new_animal['id']} already exists.\", 400\n return \"patched\", 200\n\n def delete(self, id):\n animal = Animal.query.get(id)\n if animal is None:\n return \"Animal not found\", 404\n else:\n db.session.delete(animal)\n db.session.commit()\n return \"deleted\", 200\n","sub_path":"app/web_api/resources/animal.py","file_name":"animal.py","file_ext":"py","file_size_in_byte":2159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"26886184","text":"import math\n\ngrade = 0\npoints = 0\nstudent = input()\n\nwhile student != \"midnight\":\n points = 0\n for i in range(0,6):\n problem_points = int(input())\n points += problem_points\n if points < 0:\n print(\"{st_name} was cheating!\".format(st_name=student))\n break\n\n points = (points / 600) * 100\n grade = math.floor(points) * 0.06\n print(\"points {:.2f}, grade {:.2f}\".format(points, grade))\n\n if grade >= 5:\n print(\"\"\"\n \"===================\"\n \"| CERTIFICATE |\"\n \"| {grade:.2f}/6.00 |\"\n \"===================\"\n \"Issued to {st_name}\"\n \"\"\".format(\n grade=grade,\n st_name=student\n ))\n elif 0 < grade < 5:\n print(\"{} - {:.2f}\".format(student, grade))\n else:\n grade = -1\n\n student = input()","sub_path":"Python/Basics/zad6.py","file_name":"zad6.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"566212880","text":"from kgx import PandasTransformer\nfrom kgx import ObanRdfTransformer\nimport networkx as nx\nfrom random import random\nimport logging\n\ndef test_load():\n \"\"\"\n create a random graph and save it in different formats\n \"\"\"\n G = nx.MultiDiGraph()\n\n # Adjust this to test for scalability\n N = 1000\n E = N * 3\n for i in range(1,N):\n G.add_node(curie(i), label=\"node {}\".format(i))\n for i in range(1,E):\n s = random_curie(N)\n o = random_curie(N)\n G.add_edge(o,s, predicate='related_to', relation='related_to')\n print('Nodes={}'.format(len(G.nodes())))\n rename_all(G)\n\n w = PandasTransformer(G)\n w.save(\"target/random\")\n\n w = ObanRdfTransformer(G)\n w.save(\"target/random.ttl\")\n\ndef rename_all(G):\n m = {}\n for nid in G.nodes():\n tid = nid.replace(\"X:\",\"FOO:\")\n m[nid] = tid\n print(\"Renaming...\")\n nx.relabel_nodes(G,m, copy=False)\n print(\"Renamed!\")\n\n\ndef random_curie(N):\n return curie(int(random()*N))\n\ndef curie(n):\n return \"X:{}\".format(n)\n","sub_path":"tests/test_random.py","file_name":"test_random.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"407052477","text":"class Person:\n \n number = 0 \n\n @classmethod\n def issue_id(cls):\n\n cls.number += 1\n return cls.number\n\n @classmethod\n def return_population(cls):\n\n return cls.number\n\n def __init__(self, name):\n\n self._name = name\n self._id = Person.issue_id()\n\ndef main():\n\n print(f\"Current population: {Person.return_population()}\")\n person_1 = Person(\"Alex\")\n person_2 = Person(\"John\")\n print(f\"person_1 id: {person_1._id}\")\n print(f\"person_2 id: {person_2._id}\")\n print(f\"Current population: {Person.return_population()}\")\n\nif __name__ == \"__main__\":\n\n main()\n","sub_path":"sample_code/class_method.py","file_name":"class_method.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"40957456","text":"from __future__ import print_function\nimport argparse \n\nfrom scapy.all import * \n\nclass DHCPClient: \n\tdef __init__(self):\n\t\tconf.checkIPaddr = False\n\t\t# normally Scapy makes sure that replies come from the same IP address the stimulus was sent to, however our DHCP packet was set to the IP broadcast address and any answer packet will have the IP address of the replying DHCP server as its source IP address\n\t\t# tl;dr the IP addresses wont match\n\n\t\tself.dhcp_discover = None\n\t\tself.dhcp_offer = None\n\t\tself.dhcp_request = None\n\t\tself.dhcp_ack = None\n\t\t# other DHCP packets here; decline, and inform\n\t\tself.dhcp_release = None\n\n\t\tself.iface = conf.iface\n\t\tself.srcMAC = get_if_raw_hwaddr(conf.iface)[1]\n\t\tself.destMAC = 'ff:ff:ff:ff:ff:ff' #broadcast MAC\n\t\tself.srcIPAddr = '0.0.0.0'\n\t\tself.destIPAddr = '255.255.255.255' #limited broadcast\n\t\tself.srcPort = 68\n\t\tself.destPort = 67\n\n\t# for testing and diagnostic purposes \n\tdef show(self, packet):\n\t\tpacket.show2() \n\t\treturn;\n\n\t#https://www.omnisecu.com/ccna-security/dhcp-starvation-attacks-and-dhcp-spoofing-attacks.php\n\tdef detectDHCPStarvation(self):\n\t\tdef callback(pkt):\n\t\t\tif DHCP in pkt and pkt[DHCP].options[0][1] == 1:\n\t\t\t\tprint('DHCP DISCOVER \\n %s MAC: %s' % (pkt[IP].src, pkt[Ether].src))\n\n\t\tpkt = sniff(prn=callback, store=0)\n\n\t#https://en.wikipedia.org/wiki/Rogue_DHCP\n\t#https://gist.github.com/joselitosn/a8a7b842037f9357fd56\n\tdef serverCount(self): \n\t\tdef callback(pkt):\n\t\t\tif DHCP in pkt and pkt[DHCP].options[0][1] == 2:\n\t\t\t\tprint('DHCP OFFER \\nIP: %s MAC: %s' % (pkt[IP].src, pkt[Ether].src))\n\n\t\tself.dhcp_discover = Ether(dst=self.destMAC)/IP(src=self.srcIPAddr,dst=self.destIPAddr)/UDP(sport=self.srcPort,dport=self.destPort)/BOOTP(chaddr=self.srcMAC)/DHCP(options=[(\"message-type\",\"discover\"),\"end\"])\n\t\tsendp(self.dhcp_discover)\n\t\tpkt = sniff(prn=callback, store=0)\n\n\tdef discover(self):\n\t\t#did not provide xid for this one\n\t\tself.dhcp_discover = Ether(dst=self.destMAC)/IP(src=self.srcIPAddr,dst=self.destIPAddr)/UDP(sport=self.srcPort,dport=self.destPort)/BOOTP(chaddr=self.srcMAC)/DHCP(options=[(\"message-type\",\"discover\"),\"end\"])\n\n\t\t#contains it's MAC address\n\t\t#destined for UDP port number 68\n\t\t#key values, message direction, sorce MAC address, destination MAC address, source IPv4 address; destination IPv4 address, source port number, destination port number\n\t\t\n\t\tprint(\"SENT: Discover Packet\")\n\t\tself.show(self.dhcp_discover)\n\n\t\t#srp1 only returns the first answer\n\t\tself.dhcp_offer = srp1(self.dhcp_discover, iface=self.iface, timeout=10)\n\n\t\tif self.dhcp_offer == None:\n\t\t\tself.dhcp_offer = -1\n\t\t\tprint(\"timeout\")\n\t\t\treturn \n\t\telse: \n\t\t\tprint(\"RECEIVED: Offer Packet\")\n\t\t\tself.show(self.dhcp_offer)\n\n\t\t#this is the DHCP offer\n\t\t#the message contains offered TCP/IP configuration, like IPv4 address and subnet mask, accept the first one that arrives\n\t\t#key values; message direction, source MAC address, destination MAC address, source IPv4 address, destination IPv4 address, source port number, destination port number\n\n\tdef request(self):\n\n\t\tmy_ip = self.dhcp_offer[BOOTP].yiaddr\n\t\tserver_ip = self.dhcp_offer[BOOTP].siaddr\n\n\t\tself.dhcp_request = Ether(src=self.srcMAC, dst=self.destMAC)/IP(src=self.srcIPAddr,dst=self.destIPAddr)/UDP(sport=self.srcPort,dport=self.destPort)/BOOTP(chaddr=self.srcMAC)/DHCP(options=[(\"message-type\",\"request\"),(\"server_id\",server_ip),(\"requested_addr\", my_ip),\"end\"])\n\n\t\tprint(\"SENT: Request Packet\")\n\t\tself.show(self.dhcp_request)\n\n\t\t#identify DHCPNak message to restart the process\n\t\t#constructs a DHCPACK datagram \n\t\t#includes IP address and subnet mask for the DHCP client, may include other TCP/IP configuration, etc\n\t\tself.dhcp_ack = srp1(self.dhcp_request, iface=self.iface, timeout=10)\n\n\t\tif self.dhcp_ack == None:\n\t\t\tself.dhcp_ack = -1 \n\t\t\tprint(\"timeout\")\n\t\t\treturn\n\t\telse: \n\t\t\tprint(\"RECEIVED: ACK Packet\")\n\t\t\tself.show(self.dhcp_ack) \n\n\t\t#accept first offer recieved by broadcasting a DHCP request message for the offered IPv4 address\n\t\t#contains the IP address of the server that issued the offer and the MAC address of the DHCP client\n\t\t#requests the selected DHCP server to assign the DHCP client an IP address and other TCP/IP configuration values \n\t\t#also notifies that all other DHCP servers that there offers were not accepted by the DHCP client \n\t\n\tdef release(self):\n\t\t#at the end of the test, send a DHCPRelease packet to release the IP address and cancel the remaining lease\n\t\tmy_ip = self.dhcp_offer[BOOTP].yiaddr\n\t\tserver_ip = self.dhcp_offer[BOOTP].siaddr\n\n\t\tself.dhcp_release = Ether(dst=self.destMAC)/IP(src=self.srcIPAddr,dst=self.destIPAddr)/UDP(sport=self.srcPort,dport=self.destPort)/BOOTP(chaddr=self.srcMAC)/DHCP(options=[(\"message-type\",\"release\"),(\"server_id\",server_ip),(\"requested_addr\", my_ip),\"end\"])\n\n\t\t#self.show(self.dhcp_release)\n\n\t\tsend(self.dhcp_release, iface=self.iface)\n\n\tdef run(self): \n\t\tself.discover()\n\t\tself.request()\n\t\tself.release() \n\n\nif __name__ == \"__main__\":\n\tparser = argparse.ArgumentParser(\n\tdescription=\"Show information relating to DHCP\",\n\tformatter_class=argparse.RawDescriptionHelpFormatter)\n\tparser.add_argument(\"-d\", \"--detect\", help=\"detect if a DHCP starvation attack is occuring\", action=\"store_true\", default=False) \n\tparser.add_argument(\"-s\", \"--scan\", help=\"identify all of the DNS servers contained within the network\", action=\"store_true\", default=False) \n\n\targs = parser.parse_args()\n\n\tclient = DHCPClient()\n\n\tif args.scan:\n\t\tclient.serverCount()\n\telif args.detect:\n\t\tclient.detectDHCPStarvation()\n\telse: \n\t\tclient.run()","sub_path":"dhcp_client.py","file_name":"dhcp_client.py","file_ext":"py","file_size_in_byte":5513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"296966986","text":"lst = [1, 2, 3, 4, 6, 7, 13, 18, 25, 26, 19]\nlst = sorted(lst)\n\n\ndef get_ranges(l):\n start = l[0]\n stop = l[0]\n line = \"\"\n\n for x in l[1:]:\n if stop + 1 == x:\n stop = x\n else:\n if stop == start:\n line += f'{stop}, '\n else:\n line += f'{start}-{stop}, '\n start = x\n stop = x\n if stop == start:\n line += f'{stop}'\n else:\n line += f'{start}-{stop}'\n\n print(line)\n\n\nget_ranges(lst)\n\n\ndef fizzbuzz(i):\n while i <= 50:\n if i % 5 == 0 and i % 3 == 0:\n print(\"FizzBuzz!\")\n elif i % 3 == 0:\n print(\"Fizz!\")\n elif i % 5 == 0:\n print(\"Buzz!\")\n else:\n print(i)\n i += 1\n\n\nfizzbuzz(10)\n","sub_path":"Tasks/Brizgalov_Stanislav_Tasks/Homework/tasks4.py","file_name":"tasks4.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"386003437","text":"# Copyright 2018 Iguazio\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport traceback\nfrom subprocess import PIPE, run\nfrom sys import executable, stderr\n\nimport mlrun\nfrom tests.conftest import examples_path, out_path, tests_root_directory\n\n\ndef exec_main(op, args):\n cmd = [executable, \"-m\", \"mlrun\", op]\n if args:\n cmd += args\n out = run(cmd, stdout=PIPE, stderr=PIPE, cwd=examples_path)\n if out.returncode != 0:\n print(out.stderr.decode(\"utf-8\"), file=stderr)\n print(out.stdout.decode(\"utf-8\"), file=stderr)\n print(traceback.format_exc())\n raise Exception(out.stderr.decode(\"utf-8\"))\n return out.stdout.decode(\"utf-8\")\n\n\ndef exec_run(cmd, args, test):\n args = args + [\"--name\", test, \"--dump\", cmd]\n return exec_main(\"run\", args)\n\n\ndef compose_param_list(params: dict, flag=\"-p\"):\n composed_params = []\n for k, v in params.items():\n composed_params += [flag, f\"{k}={v}\"]\n return composed_params\n\n\ndef test_main_run_basic():\n out = exec_run(\n f\"{examples_path}/training.py\",\n compose_param_list(dict(p1=5, p2='\"aaa\"')),\n \"test_main_run_basic\",\n )\n print(out)\n assert out.find(\"state: completed\") != -1, out\n\n\ndef test_main_run_hyper():\n out = exec_run(\n f\"{examples_path}/training.py\",\n compose_param_list(dict(p2=[4, 5, 6]), \"-x\"),\n \"test_main_run_hyper\",\n )\n print(out)\n assert out.find(\"state: completed\") != -1, out\n assert out.find(\"iterations:\") != -1, out\n\n\ndef test_main_run_noctx():\n out = exec_run(\n f\"{tests_root_directory}/no_ctx.py\",\n [\"--mode\", \"noctx\"] + compose_param_list(dict(p1=5, p2='\"aaa\"')),\n \"test_main_run_noctx\",\n )\n print(out)\n assert out.find(\"state: completed\") != -1, out\n\n\ndef test_main_run_archive():\n args = f\"--source {examples_path}/archive.zip --handler handler\"\n out = exec_run(\"./myfunc.py\", args.split(), \"test_main_run_archive\")\n assert out.find(\"state: completed\") != -1, out\n\n\ndef test_main_local_source():\n args = f\"--source {examples_path} --handler my_func\"\n out = exec_run(\"./handler.py\", args.split(), \"test_main_local_source\")\n print(out)\n assert out.find(\"state: completed\") != -1, out\n\n\ndef test_main_run_archive_subdir():\n runtime = '{\"spec\":{\"pythonpath\":\"./subdir\"}}'\n args = f\"--source {examples_path}/archive.zip -r {runtime}\"\n out = exec_run(\"./subdir/func2.py\", args.split(), \"test_main_run_archive_subdir\")\n print(out)\n assert out.find(\"state: completed\") != -1, out\n\n\ndef test_main_local_flag():\n fn = mlrun.code_to_function(\n filename=f\"{examples_path}/handler.py\", kind=\"job\", handler=\"my_func\"\n )\n yaml_path = f\"{out_path}/myfunc.yaml\"\n fn.export(yaml_path)\n args = f\"-f {yaml_path} --local\"\n out = exec_run(\"\", args.split(), \"test_main_local_flag\")\n print(out)\n assert out.find(\"state: completed\") != -1, out\n","sub_path":"tests/test_main.py","file_name":"test_main.py","file_ext":"py","file_size_in_byte":3415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"380647705","text":"f=open(\"leapfrog_ch_.txt\")\nline=f.readline()\nt= int(line.strip())\nff=open(\"output.out\",\"w+\")\nfor i in range(1,t+1):\n arr=f.readline().strip()\n dot=0\n B=0\n n=len(arr)\n j=0\n while jB:\n if B<2:\n ans=\"N\"\n elif dot==0 and n!=1:\n ans=\"N\"\n ff.write(\"Case #\"+str(i)+\": \"+ans+\"\\n\")\nff.close()\nf.close()","sub_path":"hackercup facbook/2019/qualification round/Leapfrog Ch 2.py","file_name":"Leapfrog Ch 2.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"384473731","text":"import sublime, sublime_plugin\nimport subprocess, os, re\n\nclass InsertTextCommand(sublime_plugin.TextCommand):\n def run(self, edit, txt):\n self.view.insert(edit, self.view.size(), txt);\n\nclass ShowTypecheckerCommand(sublime_plugin.WindowCommand):\n def run(self):\n self.unmarkAll()\n self.output_view = self.window.get_output_panel(\"textarea\")\n self.window.run_command(\"hide_panel\", {\"panel\": \"output.textarea\"})\n if not checkFileType(self.window.active_view()):\n return\n self.output_view.set_read_only(False)\n typechecker_output = self.getOutput().decode('utf-8')\n self.output_view.run_command('insert_text', {\"txt\": typechecker_output})\n self.output_view.set_read_only(True)\n if typechecker_output != \"\":\n self.window.run_command(\"show_panel\", {\"panel\": \"output.textarea\"})\n self.markErrorLines(typechecker_output)\n\n def getOutput(self):\n settings = self.window.active_view().settings()\n directory = os.path.dirname(self.window.active_view().file_name())\n ssh = settings.get(\"hack_ssh_enable\")\n address = settings.get(\"hack_ssh_address\")\n folder = settings.get(\"hack_ssh_folder\")\n if (ssh and folder != None and address != None):\n ret = subprocess.Popen(\n [\n \"ssh\", address, \"cd \" + folder + \"; hh_client\"\n ],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE\n )\n else:\n ret = subprocess.Popen(\n [\n which('hh_client'),\n '--from', 'sublime'\n # ^ doesn't do anything for hh_client yet\n ],\n cwd=directory,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE\n )\n output = ret.communicate()[0]\n if ret.returncode == 0: # No Errors\n return \"\"\n return output\n\n def markErrorLines(self, output):\n views = {}\n regions = {}\n errors = output.split(\"\\n\")\n status = \"\"\n\n for err in errors:\n result = re.search('^(.*):(\\d*):(\\d*),(\\d*): (.*)$', err)\n if result is None:\n continue\n filename, line_number, char_start, char_end, err_msg = result.groups()\n status += line_number + \": \" + err_msg + \" \"\n\n filename = os.path.basename(filename)\n view = self.window.find_open_file(filename)\n if view is None:\n continue\n\n offset = view.text_point(int(line_number) - 1, 0)\n region = sublime.Region(\n offset - 1 + int(char_start),\n offset + int(char_end)\n )\n views[view.id()] = view\n\n if not regions.get(view.id()):\n regions[view.id()] = []\n regions[view.id()].append(region)\n\n for view in views:\n self.markError(views[view], regions[view])\n self.window.active_view().set_status('hacklang', status)\n\n def markError(self, view, regions):\n view.add_regions(\n 'error', regions, 'invalid', 'circle'\n )\n\n def unmarkAll(self):\n for view in self.window.views():\n view.erase_regions('error')\n view.erase_status('hacklang')\n\n\n\nclass onSaveListener(sublime_plugin.EventListener):\n\tdef on_post_save(self, view):\n\t\tview.window().run_command(\"show_typechecker\")\n\nclass CompletionsListener(sublime_plugin.EventListener):\n\n def on_query_completions(self, view, prefix, locations):\n view.settings()\n ssh = settings.get(\"hack_ssh_enable\")\n if ssh or not checkFileType(view):\n return [()] # default\n directory = os.path.dirname(view.file_name())\n startregion = sublime.Region(0, locations[0])\n endregion = sublime.Region(locations[0], view.size());\n contents = view.substr(startregion)+\"AUTO332\"+view.substr(endregion)\n proc = subprocess.Popen(\n [\n which('hh_client'), \"--auto-complete\"\n ],\n cwd=directory,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE\n )\n stdout = proc.communicate(contents.encode('utf-8'))\n if proc.returncode == 0:\n results = self.format(stdout)\n return results\n\n # hh_client returns a tuple\n # = (\"newline seperated list\", None) for what I see\n def format(self, input):\n entries = results = []\n for entry in input[0].decode('utf-8').split(\"\\n\"):\n if not entry:\n continue\n space = entry.find(' ')\n if entry[0] == '$': # Variable\n name_end = len(entry) if (space == -1) else space\n word = entry[1:name_end]\n results.append((entry, word));\n elif space < 0: # Class or function\n results.append((entry, entry))\n else: # Method, property or constant\n word = entry[:space]\n results.append((entry, word))\n return results\n\n\n\ndef checkFileType(view):\n tag = view.substr(sublime.Region(0, 2))\n return tag == ''\n\ndef which(program):\n def is_exe(fpath):\n return os.path.isfile(fpath) and os.access(fpath, os.X_OK)\n\n fpath, fname = os.path.split(program)\n if fpath:\n if is_exe(program):\n return program\n else:\n pathenv = os.getenv(\"PATH\") + os.pathsep + \"/usr/local/bin/\"\n for path in pathenv.split(os.pathsep):\n path = path.strip('\"')\n exe_file = os.path.join(path, program)\n if is_exe(exe_file):\n return exe_file\n raise Exception(\"hh_client executable not found\")\n","sub_path":"hack.py","file_name":"hack.py","file_ext":"py","file_size_in_byte":5962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"119989791","text":"# Author: CZ\n# Time: 2019-07-10 12:03\n\nimport sys\nimport time\nimport random\nimport hashlib\nimport requests\nfrom PyQt5.QtGui import QIcon\nfrom PyQt5.QtWidgets import QWidget, QApplication, QGridLayout, QLabel, QLineEdit, QPushButton\n\n\nclass youdao():\n\tdef __init__(self):\n\t\tself.headers = {\n\t\t\t\t\t\t'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36',\n\t\t\t\t\t\t'Referer': 'http://fanyi.youdao.com/',\n\t\t\t\t\t\t'Cookie': 'OUTFOX_SEARCH_USER_ID=-481680322@10.169.0.83;'\n\t\t\t\t\t}\n\t\tself.data = {\n\t\t\t\t\t\t'i': None,\n\t\t\t\t\t\t'from': 'AUTO',\n\t\t\t\t\t\t'to': 'AUTO',\n\t\t\t\t\t\t'smartresult': 'dict',\n\t\t\t\t\t\t'client': 'fanyideskweb',\n\t\t\t\t\t\t'salt': None,\n\t\t\t\t\t\t'sign': None,\n\t\t\t\t\t\t'ts': None,\n\t\t\t\t\t\t'bv': None,\n\t\t\t\t\t\t'doctype': 'json',\n\t\t\t\t\t\t'version': '2.1',\n\t\t\t\t\t\t'keyfrom': 'fanyi.web',\n\t\t\t\t\t\t'action': 'FY_BY_REALTlME'\n\t\t\t\t\t}\n\t\tself.url = 'http://fanyi.youdao.com/translate_o?smartresult=dict&smartresult=rule'\n\n\tdef translate(self, word):\n\t\tts = str(int(time.time()*10000))\n\t\tsalt = str(int(time.time()*10000) + random.random()*10 + 10)\n\t\tsign = 'fanyideskweb' + word + salt + '97_3(jkMYg@T[KZQmqjTK'\n\t\tsign = hashlib.md5(sign.encode('utf-8')).hexdigest()\n\t\tbv = '5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36'\n\t\tbv = hashlib.md5(bv.encode('utf-8')).hexdigest()\n\t\tself.data['i'] = word\n\t\tself.data['salt'] = salt\n\t\tself.data['sign'] = sign\n\t\tself.data['ts'] = ts\n\t\tself.data['bv'] = bv\n\t\tres = requests.post(self.url, headers=self.headers, data=self.data)\n\t\treturn [res.json()['translateResult'][0][0].get('tgt')]\n\n\nclass Demo(QWidget):\n\tdef __init__(self, parent=None):\n\t\tsuper().__init__()\n\t\tself.setWindowTitle('翻译软件-公众号: Charles的皮卡丘')\n\t\tself.setWindowIcon(QIcon('data/icon.jpg'))\n\t\tself.Label1 = QLabel('原文')\n\t\tself.Label2 = QLabel('译文')\n\t\tself.LineEdit1 = QLineEdit()\n\t\tself.LineEdit2 = QLineEdit()\n\t\tself.translateButton2 = QPushButton()\n\t\tself.translateButton2.setText('有道翻译')\n\t\tself.grid = QGridLayout()\n\t\tself.grid.setSpacing(12)\n\t\tself.grid.addWidget(self.Label1, 1, 0)\n\t\tself.grid.addWidget(self.LineEdit1, 1, 1)\n\t\tself.grid.addWidget(self.Label2, 2, 0)\n\t\tself.grid.addWidget(self.LineEdit2, 2, 1)\n\t\tself.grid.addWidget(self.translateButton2, 2, 2)\n\t\tself.setLayout(self.grid)\n\t\tself.resize(400, 150)\n\t\tself.translateButton2.clicked.connect(lambda: self.translate())\n\t\tself.yd_translate = youdao()\n\n\tdef translate(self):\n\t\tword = self.LineEdit1.text()\n\t\tif not word:\n\t\t\treturn\n\t\telse:\n\t\t\tresults = self.yd_translate.translate(word)\n\t\tself.LineEdit2.setText(';'.join(results))\n\n\nif __name__ == '__main__':\n\tapp = QApplication(sys.argv)\n\tdemo = Demo()\n\tdemo.show()\n\tsys.exit(app.exec_())\n","sub_path":"code folder/STH/youdao translator.py","file_name":"youdao translator.py","file_ext":"py","file_size_in_byte":2746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"55087641","text":"from tkinter import *\n\nroot=Tk()\n\nroot.title(\"Contador\")\nroot.iconbitmap(\"peneico.ico\")\n\n\ndef sumar(num):\n\tnum+=1\n\tpantalla.set(num)\n\ndef restar(num):\n\tnum-=1\n\tpantalla.set(num)\n\ndef reset():\n\tpantalla.set(0)\n\nmiFrame=Frame(root)\nmiFrame.pack()\nmiFrame.config(width=\"390\", height=\"35\")\nmiFrame.grid_propagate(False)\n\npantalla=IntVar()\npantalla.set(0)\n\n\n\ntexto=Label(miFrame, text=\"Contador\")\ntexto.grid(row=0, column=0, sticky=\"e\", padx=5, pady=3)\n\nmostrar=Entry(miFrame, state=\"readonly\", textvariable=pantalla)\nmostrar.grid(row=0, column=1, padx=3, pady=3)\n\nboton=Button(miFrame, text=\"Cont Up\", width=6, cursor=\"hand2\", command=lambda:sumar(pantalla.get()))\nboton.grid(row=0, column=2, padx=3, pady=3)\n\nboton=Button(miFrame, text=\"Cont Down\", width=9, cursor=\"hand2\", command=lambda:restar(pantalla.get()))\nboton.grid(row=0, column=3, padx=3, pady=3)\n\nboton=Button(miFrame, text=\"Reset\", width=5, cursor=\"hand2\", command=lambda:reset())\nboton.grid(row=0, column=4, padx=3, pady=3)\n\n\n\n\n\n\n\n\n\nroot.mainloop()","sub_path":"Graficas/Ejercicios/gads.py","file_name":"gads.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"272122739","text":"\"\"\"Configuration dictionaries for jobs.\"\"\"\n# std\nfrom dataclasses import asdict, dataclass\nimport json\nimport os\nfrom typing import Any, Mapping, Optional, Type\n\n# Constants\nINFINITE = -1\nONE_DAY = 86400\nONE_MINUTE = 60\n\n\ndef _get_funsies_url(url: Optional[str] = None) -> str:\n \"\"\"Get the default funsies URL.\"\"\"\n if url is not None:\n return url\n else:\n try:\n default = os.environ[\"FUNSIES_URL\"]\n except KeyError:\n default = \"redis://localhost:6379\"\n return default\n\n\ndef _extract_hostname(url: str) -> str:\n \"\"\"Get the hostname part of the url.\"\"\"\n if \"@\" in url:\n hn = url.split(\"@\")[-1]\n else:\n hn = url.split(\"//\")[-1]\n return hn\n\n\n@dataclass\nclass Options:\n \"\"\"Runtime options for an Operation.\n\n This is a class that basically contains all the random options that may\n need to be set when building a workflow, such as timeouts, heterogeneous\n compute etc. It should generally be instantiated using the\n `funsies.options()` function.\n\n \"\"\"\n\n timeout: int = INFINITE\n \"\"\"Max execution time for this operation, in seconds or -1 for an operation\n that never timeouts. Defaults to -1.\"\"\"\n\n queue: str = \"default\"\n \"\"\"Defines which queue this operation should be executed by. For example,\n if a complex workflow requires GPUs for certain jobs, those jobs would be\n setup with option `queue=\"gpu\"` and workers on the nodes with available\n GPUs would be instantiated with `funsies worker gpu`. Then, only worker\n processes in the GPU queue would execute the GPU jobs.\"\"\"\n\n distributed: bool = True\n \"\"\"If False, jobs are executed by the local enqueuing process. Used to\n test workflows without having to start workers.\"\"\"\n\n reset: bool = False\n \"\"\"If `True`, this operation is `funsies.reset()` when generated.\"\"\"\n\n evaluate: bool = True\n \"\"\"If False, calling `funsies.execute()` on this job or its dependencies will fail.\n Can be used to ensure a specific branch is never executed.\"\"\"\n\n ttl: int = ONE_DAY\n \"\"\"Time to live (ttl) in queue for the operation. Defaults to 24h. Equivalent\n to the [rq keyword with the same name](https://python-rq.org/docs/). \"\"\"\n\n result_ttl: int = ONE_MINUTE\n \"\"\"Time to live (ttl) in queue for the rq result objects. Defaults to one\n minute. Equivalent to the [rq keyword with the same\n name](https://python-rq.org/docs/). (Note that this has nothing to do with\n the actual data results.) \"\"\"\n\n failure_ttl: int = ONE_DAY\n \"\"\"Time to live (ttl) in queue for the rq result objects of failing jobs.\n Defaults to one day. Equivalent to the [rq keyword with the same\n name](https://python-rq.org/docs/). (Note that this has nothing to do with\n the actual data results.) \"\"\"\n\n # TODO: make meaningfully adjustable\n serializer: str = \"rq.serializers.JSONSerializer\"\n\n @property\n def job_args(self: \"Options\") -> Mapping[str, Any]:\n \"\"\"Return a dictionary of arguments for rq.enqueue's job_args.\"\"\"\n return dict(\n timeout=self.timeout,\n ttl=self.ttl,\n result_ttl=self.result_ttl,\n failure_ttl=self.failure_ttl,\n )\n\n @property\n def task_args(self: \"Options\") -> Mapping[str, Any]:\n \"\"\"Return a dictionary of arguments for dag.task().\"\"\"\n return dict(\n evaluate=self.evaluate,\n )\n\n @property\n def queue_args(self: \"Options\") -> Mapping[str, Any]:\n \"\"\"Return a dictionary of arguments for rq.Queue.\"\"\"\n return dict(is_async=self.distributed, serializer=self.serializer)\n\n def pack(self: \"Options\") -> str:\n \"\"\"Pack an Options instance to a bytestring.\"\"\"\n return json.dumps(asdict(self))\n\n @classmethod\n def unpack(cls: Type[\"Options\"], data: str) -> \"Options\":\n \"\"\"Unpack an Options instance from a byte string.\"\"\"\n return Options(**json.loads(data))\n","sub_path":"src/funsies/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":3939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"420981993","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nplt.style.use('fivethirtyeight')\n\ndata_dict = {\n 'Pratto': [0.227, 0.357, 0.381, 0.601, 0.365],\n 'Scocco': [0.548, 0.509, 0.407, 0.252, 0.230],\n 'Blandi': [0.416, 0.541, 0.894, 0.108, 0.444],\n 'Ábila': [0.633, 0.508, 0.564, 0.604, 0.563],\n 'Ruben': [0.170, 0.434, 0.436, 0.800, 0.052],\n 'Cvitanich': [0.542, 0.583, 0.000, 0.688, 0.339]\n}\n\nseasons = ['2013-14', '2014-15', '2015-16', '2016-17', '2017-18']\ncolors = ['#ff2700'] + ['#008fd5'] * 5\n\nind = np.arange(len(seasons))\nwidth = 0.9 / len(data_dict.keys())\n\nd = 0\nfig, ax = plt.subplots()\nn = 0\nfor season, data in data_dict.items():\n plot_data = list(reversed(data))\n ax.bar(ind + d, plot_data, width, color=colors[n], edgecolor='0.8')\n n += 1\n d += width\n\n# add some text for labels, title and axes ticks\n\nplt.ylim(0, 1.1)\n\nplt.title(\n 'Pratto nunca se destacó por sus goles',\n weight='bold',\n loc='left',\n fontsize=26,\n color='0.2'\n)\nax.text(\n -0.3,\n 1.05,\n 'Comparación de Goles por 90 minutos jugados de Pratto vs otros delanteros goleadores',\n color='0.3',\n fontsize=18,\n horizontalalignment='left'\n)\n\n\nplt.xticks(\n ind+0.4,\n seasons,\n color='0.3',\n fontsize=16,\n # rotation='vertical'\n )\n\nplt.show()\n","sub_path":"analyze_data/pratto_analysis/forwards_career_comparison.py","file_name":"forwards_career_comparison.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"5973958","text":"#!/usr/bin/python2\nfrom datetime import datetime\nimport configparser\nimport time\nimport syslog\nimport logging\n\nconfig_fname = 'defaults.cfg'\nconfig = configparser.ConfigParser()\nconfig.read(config_fname)\n\n\n\n#******************************************************************\ndef gconfig():\n \"\"\" The main program that links the gateway Moteino to the MQTT system\n \"\"\"\n #global reqList\n # Set up logging\n #syslog.syslog('gconfig Starting')\n #FORMAT = '%(asctime)-15s %(message)s'\n #logging.basicConfig(filename='/home/pi/gateway/gconfig.log',level=logging.DEBUG,format=FORMAT)\n #logging.info('gateway starting')\n\n #for key in config['DEFAULT']: print(key)\n\n for sec in config.sections():\n print(\"[\" + sec + \"]\")\n for key in config[sec]:\n print(key + \":\" + config[sec][key])\n \n print('Next')\n #print(config['general']['devices'])\n x = config['general']['devices']\n print(\"Length of x {}\".format(len(x)))\n #print(x[-1])\n nCnt = 0\n print('Next')\n for y in x:\n print(nCnt)\n print(y)\n nCnt += 1\n print('Next')\n\n#********************************************************************\nif __name__ == \"__main__\":\n gconfig()\n","sub_path":"gconfig.py","file_name":"gconfig.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"322986531","text":"from wsgiref.simple_server import make_server\n\nfrom tangled.web import Application, Resource\n\n\nclass Hello(Resource):\n\n def GET(self):\n if 'name' in self.urlvars:\n content = 'Hello, {name}'.format(**self.urlvars)\n else:\n content = 'Hello'\n return content\n\n\nif __name__ == '__main__':\n settings = {\n 'debug': True,\n }\n app = Application(settings)\n app.mount_resource('hello', Hello, '/')\n app.mount_resource('hello_name', Hello, '/')\n server = make_server('0.0.0.0', 6666, app)\n server.serve_forever()\n","sub_path":"examples/hello_world.py","file_name":"hello_world.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"61101207","text":"\"\"\"\nMapper for any metric class and subclass.\n\"\"\"\n\nfrom typing import Any, Dict, Optional, Union\n\nfrom nlgmetricverse.metrics._core.base import MetricForTask\nfrom nlgmetricverse.metrics._core.utils import TaskNotAvailable\nfrom nlgmetricverse.utils.string import camel_to_snake\n\n\nclass TaskMapper:\n \"\"\"\n Base metric factory class which will be used as mapper for any metric class. This class is used by\n :py:class:`nlgmetricverse.AutoMetric` for loading specified metric. It maps the class to a specified metric class\n if multiple tasks are available for the metric.\n All metrics using TaskMapper must implement _TASKS attribute.\n Note:\n Use :py:class:`nlgmetricverse.metrics.TaskMapper` instead in case of metrics implementing a single task.\n \"\"\"\n\n _TASKS: Dict[str, MetricForTask] = None\n\n def __init__(self, *args, **kwargs):\n raise EnvironmentError(\"This class is designed to be instantiated by using 'by_task()' method.\")\n\n @classmethod\n def construct(\n cls, task: str, resulting_name: Optional[str] = None, compute_kwargs: Optional[Dict[str, Any]] = None, **kwargs\n ) -> MetricForTask:\n \"\"\"\n Common interface for all metrics for specified MetricForTask to be constructed.\n\n :param task: Task name for the desired metric to obtain the subclass.\n :param resulting_name: Resulting name of the computed score returned. If None,`~._get_path()` is used.\n :param compute_kwargs: Arguments to be passed to `compute()` method of metric at computation.\n :param kwargs: Additional arguments used for the metric computation.\n :raises Exception: :py:class:`TaskNotAvailable`.\n :return: Metric for proper task if available.\n \"\"\"\n subclass = cls._get_subclass(task=task)\n path = cls._get_path()\n resulting_name = resulting_name or path\n if subclass is None:\n raise TaskNotAvailable(path=path, task=task)\n return subclass._construct(resulting_name=resulting_name, compute_kwargs=compute_kwargs, **kwargs)\n\n @classmethod\n def _get_subclass(cls, task: str) -> Union[MetricForTask, None]:\n \"\"\"\n All metric modules must implement this method as it is used to call metrics by default. Should raise\n proper exception (``TaskNotAvailable``) if the task is not supported by the metric.\n\n :param task: Task name for the desired metric.\n :return: Metric for proper task if available, None otherwise.\n \"\"\"\n return cls._TASKS.get(task, None)\n\n @classmethod\n def _get_path(cls):\n return camel_to_snake(cls.__name__)\n\n\nclass MetricAlias(TaskMapper):\n \"\"\"\n Extension of TaskMapper which allows a single :py:class:`nlgmetricverse.metrics.MetricForTask` class to be aliased.\n If a metric has a single task, use this class instead of :py:class:`nlgmetricverse.metrics._core.TaskMapper`.\n All metrics using MetricAlias must implement _SUBCLASS attribute.\n \"\"\"\n\n _SUBCLASS: MetricForTask = None\n\n @classmethod\n def construct(\n cls,\n task: str = None,\n resulting_name: Optional[str] = None,\n compute_kwargs: Optional[Dict[str, Any]] = None,\n **kwargs\n ) -> MetricForTask:\n \"\"\"\n Common interface for all metrics for specified MetricForTask to be constructed. Do not raise\n :py:class:`TaskNotAvailable` unlike :py:class:`TaskMapper` as it directly uses _SUBCLASS defined.\n\n :param task: Ignored. Preserved to provide a common interface.\n :param resulting_name: Resulting name of the computed score returned. If None, `~._get_path()` is used.\n :param compute_kwargs: Arguments to be passed to `compute()` method of metric at computation.\n :param kwargs: Additional arguments used for the metric computation.\n :return: Metric for proper task if available.\n \"\"\"\n subclass = cls._get_subclass()\n resulting_name = resulting_name or cls._get_path()\n return subclass._construct(resulting_name=resulting_name, compute_kwargs=compute_kwargs, **kwargs)\n\n @classmethod\n def _get_subclass(cls, *args, **kwargs) -> MetricForTask:\n \"\"\"\n Get metric subclass.\n\n :param args: Arguments to pass to the subclass\n :param kwargs: Additional arguments used for the metric computation.\n :return: Subclass metric for proper task.\n \"\"\"\n return cls._SUBCLASS\n","sub_path":"nlgmetricverse/metrics/_core/auxiliary.py","file_name":"auxiliary.py","file_ext":"py","file_size_in_byte":4434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"411145392","text":"from db.run_sql import run_sql\nfrom models.account import Account\nfrom models.merchant import Merchant\nfrom models.transaction import Transaction\nfrom models.tag import Tag\nimport repositories.merchant_repository as merchant_repository\nimport repositories.transaction_repository as transaction_repository\nimport repositories.tag_repository as tag_repository\n\ndef save_account(account):\n sql = \"INSERT INTO accounts (user_name, balance, transaction_summary) VALUES (%s, %s, %s) RETURNING id\"\n values = [account.user_name, account.balance, account.transaction_summary]\n results = run_sql(sql, values)\n id = results[0]['id']\n account.id = id\n\n# SELECT ALL\n\ndef select_all_account():\n accounts_selected = []\n sql = 'SELECT * FROM accounts'\n results = run_sql(sql)\n for result in results:\n account = Account(result['user_name'], result['balance'], result['transaction_summary'], result['id'])\n accounts_selected.append(account)\n return accounts_selected\n\n# SELECT\ndef select_one_account(id):\n sql = 'SELECT * FROM accounts WHERE id = %s'\n values = [id]\n results = run_sql(sql, values)[0]\n account = Account(results['user_name'],results['balance'], results['transaction_summary'], results['id'])\n return account\n\n# DELETE ALL\ndef delete_all_accounts():\n sql = 'DELETE FROM accounts'\n run_sql(sql)\n\n# DELETE\ndef delete_one_account(id):\n sql = 'DELETE FROM accounts WHERE id = %s'\n values = [id]\n run_sql(sql, values)\n\n# UPDATE\n\ndef update_account(account):\n sql = 'UPDATE accounts SET (user_name, balance, transaction_summary) = (%s, %s, %s) WHERE id = %s'\n values = [account.user_name, account.balance, account.transaction_summary, account.id]\n run_sql(sql, values)\n\n# def update_balance(amount):\n# amount = transaction_repository.add_transaction_total()","sub_path":"repositories/account_repository.py","file_name":"account_repository.py","file_ext":"py","file_size_in_byte":1835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"357559473","text":"from sqlalchemy import Column, Integer, BigInteger, String, DateTime, Boolean\nfrom sqlalchemy.orm import declarative_base\n\nGambaModel = declarative_base()\n\n\nclass GambaUser(GambaModel):\n __tablename__ = \"gamba_user\"\n\n guild_id = Column(BigInteger, primary_key=True)\n user_id = Column(BigInteger, primary_key=True)\n balance = Column(Integer, nullable=False, default=0)\n last_claim = Column(DateTime, nullable=True)\n\n\nclass GambaGame(GambaModel):\n __tablename__ = \"gamba_game\"\n\n guild_id = Column(BigInteger, primary_key=True)\n channel_id = Column(BigInteger, nullable=False)\n title = Column(String, nullable=False)\n option_a = Column(String, nullable=False)\n option_b = Column(String, nullable=False)\n open_until = Column(DateTime, nullable=False)\n is_open = Column(Boolean, nullable=False, default=True)\n # Separate variable to track whether the game is open to account for bot message update delays\n message_id = Column(BigInteger, nullable=True)\n creator_user_id = Column(BigInteger, nullable=False)\n # Creator can't make bets to avoid when all mods make a bet and nobody can pay out\n\n\nclass GambaBet(GambaModel):\n __tablename__ = \"gamba_bet\"\n\n guild_id = Column(BigInteger, primary_key=True)\n user_id = Column(BigInteger, primary_key=True)\n option_a = Column(Integer, nullable=True)\n option_b = Column(Integer, nullable=True)\n","sub_path":"dingomata/cogs/gamba/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"636294055","text":"# randomized word embedding\n# pretrained word embedding\n\n\nimport tensorflow as tf\nimport tensorflow.contrib.layers as layers\n\nclass Seq2seqCore:\n def __init__(self,\n gpu_device=1,\n encoder_vocab_size=10000,\n decoder_vocab_size=5000,\n embedding_size=512,\n pretrained_embedding_size=200,\n encoder_hidden_size=128):\n\n session_config = tf.ConfigProto(device_count={'GPU': 1})\n session_config.gpu_options.visible_device_list = str(gpu_device)\n session_config.gpu_options.allow_growth = True\n self.sess = tf.Session(config=session_config)\n\n self.encoder_vocab_size = encoder_vocab_size\n self.decoder_vocab_size = decoder_vocab_size\n self.embedding_size = embedding_size\n self.pretrained_embedding_size = pretrained_embedding_size\n\n self.encoder_hidden_size = encoder_hidden_size\n self.decoder_hidden_size = self.encoder_hidden_size * 2\n self.encoder_inputs = tf.placeholder(shape=(None, None), dtype=tf.int32, name='encoder_inputs')\n self.encoder_pretrained_inputs = tf.placeholder(shape=(None, None, self.pretrained_embedding_size),\n dtype=tf.float32,\n name='encoder_pretrained_inputs')\n self.encoder_inputs_length = tf.placeholder(shape=(None,), dtype=tf.int32, name='encoder_inputs_length')\n batch_size, output_max_length = tf.unstack(tf.shape(self.encoder_inputs))\n start_tokens = tf.ones([batch_size], dtype=tf.int32)\n self.decoder_targets = tf.placeholder(shape=(None, None), dtype=tf.int32, name='decoder_targets')\n decoder_inputs = tf.concat([tf.expand_dims(start_tokens, 1), self.decoder_targets], 1)\n self.decoder_inputs_length = tf.placeholder(shape=(None,), dtype=tf.int32, name='decoder_inputs_length')\n\n encoder_embeddings = tf.Variable(tf.random_uniform([self.encoder_vocab_size, self.embedding_size], -1.0, 1.0),\n dtype=tf.float32)\n decoder_embeddings = tf.Variable(tf.random_uniform([self.decoder_vocab_size, self.embedding_size], -1.0, 1.0),\n dtype=tf.float32)\n\n encoder_inputs_embedded = tf.nn.embedding_lookup(encoder_embeddings, self.encoder_inputs)\n decoder_inputs_embedded = tf.nn.embedding_lookup(decoder_embeddings, decoder_inputs)\n\n encoder_inputs_embedded_concat = tf.concat((encoder_inputs_embedded, self.encoder_pretrained_inputs), axis=2)\n\n fw_encoder_cell = tf.nn.rnn_cell.LSTMCell(self.encoder_hidden_size)\n bw_encoder_cell = tf.nn.rnn_cell.LSTMCell(self.encoder_hidden_size)\n\n ((encoder_fw_outputs,\n encoder_bw_outputs),\n (encoder_fw_final_state,\n encoder_bw_final_state)) = (tf.nn.bidirectional_dynamic_rnn(cell_fw=fw_encoder_cell,\n cell_bw=bw_encoder_cell,\n inputs=encoder_inputs_embedded_concat,\n sequence_length=self.encoder_inputs_length,\n dtype=tf.float32))\n encoder_outputs = tf.concat((encoder_fw_outputs, encoder_bw_outputs), 2)\n\n train_helper = tf.contrib.seq2seq.TrainingHelper(decoder_inputs_embedded, self.decoder_inputs_length)\n pred_helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(decoder_embeddings,\n start_tokens=start_tokens, end_token=0)\n\n def decode(helper, scope, reuse=None):\n with tf.variable_scope(scope, reuse=reuse):\n attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(\n num_units=self.encoder_hidden_size, memory=encoder_outputs,\n memory_sequence_length=self.encoder_inputs_length)\n decoder_cell = tf.nn.rnn_cell.LSTMCell(self.decoder_hidden_size)\n attn_cell = tf.contrib.seq2seq.AttentionWrapper(\n decoder_cell, attention_mechanism, attention_layer_size=self.encoder_hidden_size)\n out_cell = tf.contrib.rnn.OutputProjectionWrapper(\n attn_cell, self.decoder_vocab_size, reuse=reuse\n )\n decoder = tf.contrib.seq2seq.BasicDecoder(\n cell=out_cell, helper=helper,\n initial_state=out_cell.zero_state(\n dtype=tf.float32, batch_size=batch_size))\n outputs = tf.contrib.seq2seq.dynamic_decode(\n decoder=decoder, output_time_major=False,\n impute_finished=True, maximum_iterations=5\n )\n return outputs[0]\n\n train_outputs = decode(train_helper, 'decode')\n self.pred_outputs = decode(pred_helper, 'decode', reuse=True)\n\n weights = tf.to_float(tf.not_equal(decoder_inputs[:, 1:], 0))\n self.loss = tf.contrib.seq2seq.sequence_loss(\n train_outputs.rnn_output, self.decoder_targets, weights=weights)\n\n self.train_op = tf.train.AdamOptimizer().minimize(self.loss)\n\n self.sess.run(tf.global_variables_initializer())\n\n # Saver\n self.saver = tf.train.Saver(max_to_keep=0)\n\n # Finalizer\n self.sess.graph.finalize()\n\n def save(self, ckpt_path):\n self.saver.save(self.sess, ckpt_path)\n\n def load(self, ckpt_path):\n self.saver.restore(self.sess, ckpt_path)\n\n def fit(self, input_feed_dict):\n feed_dict = dict()\n feed_dict[self.decoder_targets] = input_feed_dict[\"decoder_target\"]\n feed_dict[self.encoder_inputs] = input_feed_dict[\"encoder_input\"]\n feed_dict[self.encoder_pretrained_inputs] = input_feed_dict[\"encoder_pretrained\"]\n feed_dict[self.encoder_inputs_length] = input_feed_dict[\"encoder_length\"]\n feed_dict[self.decoder_inputs_length] = input_feed_dict[\"decoder_length\"]\n _, l = self.sess.run([self.train_op, self.loss], feed_dict)\n print(l)\n\n def predict(self, input_feed_dict):\n feed_dict = dict()\n feed_dict[self.encoder_inputs_length] = input_feed_dict[\"encoder_length\"]\n # feed_dict[self.decoder_inputs_length] = input_feed_dict[\"decoder_length\"]\n feed_dict[self.encoder_pretrained_inputs] = input_feed_dict[\"encoder_pretrained\"]\n feed_dict[self.encoder_inputs] = input_feed_dict[\"encoder_input\"]\n prediction_output = self.sess.run(self.pred_outputs, feed_dict)\n return prediction_output\n\n def evaluate(self, input_feed_dict):\n prediction_output = self.predict(input_feed_dict).tolist()\n groundtruth = input_feed_dict[\"decoder_target\"].T.tolist()\n assert len(prediction_output) == len(groundtruth)\n total_size = len(prediction_output)\n correct_flag = 0\n for idx in range(len(prediction_output)):\n if prediction_output[idx] == groundtruth[idx]:\n correct_flag += 1\n return correct_flag\n","sub_path":"seq2seq/utils/seq2seq/learning_core_20180331.py","file_name":"learning_core_20180331.py","file_ext":"py","file_size_in_byte":7211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"584856334","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Dec 12 18:06:57 2018\n\n@author: aappling\n\"\"\"\n\nfrom __future__ import print_function, division\nimport random\nimport sys\nimport datetime as dt\nimport numpy as np\nsys.path.append('2_model/src')\nimport tf_graph\nimport tf_train\n\ndef apply_pgnn(\n phase = 'pretrain',\n learning_rate = 0.005,\n state_size = 8, # Step 1: try number of input drivers, and half and twice that number\n ec_threshold = 24, # TODO: calculate for each lake: take GLM temperatures, calculate error between lake energy and the fluxes, take the maximum as the threshold. Can tune from there, but it usually doesn’t change much from the maximum\n dd_lambda = 0, # TODO: implement depth-density constraint in model\n ec_lambda = 0.025, # original mille lacs values were 0.0005, 0.00025\n l1_lambda = 0.05,\n data_file = '1_format/tmp/pgdl_inputs/nhd_1099476.npz',\n sequence_offset = 100,\n max_batch_obs = 50000,\n n_epochs = 2, # 200 is usually more than enough\n min_epochs_test = 0,\n min_epochs_save = 2, # later is recommended (runs quickest if ==n_epochs)\n track_epoch_data = False, # False is faster, True is more interesting\n restore_path = '', #'2_model/out/EC_mille/pretrain',\n save_path = '2_model/tmp/nhd_1099476/pretrain'\n # TODO: should have L1 and L2 norm weights in this list, implemented in tf_graph\n ):\n \"\"\"Train (or pretrain) a PGRNN, optionally save the weights+biases and/or predictions, and return the predictions\n\n Args:\n learning_rate: NN learning rate\n state_size: Number of units in each cell's hidden state\n ec_threshold: Energy imbalance beyond which NN will be penalized\n dd_lambda: PRESENTLY IGNORED. Depth-density penalty lambda, a hyperparameter that needs manual tuning. Multiplier to depth-density loss when adding to other losses.\n ec_lambda: Energy-conservation penalty lambda, another hyperparameter that needs manual tuning. Multiplier to energy-conservation loss when adding to other losses. Could set ec_lambda=0 if we wanted RNN only, no EC component\n l1_lambda: L1-regularization penalty lambda, another hyperparameter that needs manual tuning\n data_file: Filepath for the one file per lake that contains all the data.\n sequence_offset: Number of observations by which each data sequence in inputs['predict.features'] is offset from the previous sequence. Used to reconstruct a complete prediction sequence without duplicates.\n max_batch_obs: Upper limit on number of individual temperature predictions (date-depth-split combos) per batch. True batch size will be computed as the largest number of completes sequences for complete depth profiles that fit within this max_batch_size.\n n_epochs: Total number of epochs to run during training. Needs to be larger than the n epochs needed for the model to converge\n min_epochs_test: Minimum number of epochs to run through before computing test losses\n min_epochs_save: Minimum number of epochs to run through before considering saving a checkpoint (must be >= min_epochs_test)\n track_epoch_data: Should predictions and trainable variables be stored for each epoch? If no, predictions will still be stored after the final epoch\n restore_path: Path to restore a model from, or ''\n save_path: Path (directory) to save a model to. Will always be saved as ('checkpoint_%s' %>% epoch)\n \"\"\"\n\n # Track runtime\n start_time = dt.datetime.now()\n\n random.seed(9001)\n\n # %% Load data\n print('Loading data...')\n inputs = np.load(data_file)\n # Compute desirable number of sequences per batch based on max_batch_obs and\n # the number of observations in each sequence (depths*dates)\n obs_per_seq = inputs['train.labels'][:,:,0].size # get the num obs in the first sequence (same for every sequence)\n seq_per_batch = np.int(np.floor(max_batch_obs / obs_per_seq))\n\n # %% Build graph\n print('Building graph...')\n train_op, total_loss, rmse_loss, ec_loss, l1_loss, param, pred, x, y, m, unsup_inputs, unsup_phys_data = tf_graph.build_tf_graph(\n inputs['train.labels'].shape[1], inputs['train.features'].shape[2], state_size,\n inputs['unsup.physics'].shape[2], inputs['colnames.physics'], inputs['geometry'],\n ec_threshold, dd_lambda, ec_lambda, l1_lambda, seq_per_batch, learning_rate)\n\n # %% Train model\n print('Training model...')\n x_unsup = inputs['unsup.features']\n p_unsup = inputs['unsup.physics']\n x_pred = inputs['predict.features']\n if phase == 'tune':\n x_train = inputs['tune_train.features']\n y_train = inputs['tune_train.labels']\n m_train = inputs['tune_train.mask']\n x_test = inputs['tune_test.features']\n y_test = inputs['tune_test.labels']\n m_test = inputs['tune_test.mask']\n elif phase == 'pretrain':\n x_train = inputs['pretrain.features']\n y_train = inputs['pretrain.labels']\n m_train = inputs['pretrain.mask']\n x_test = inputs['pretrain.features'] # test on training data\n y_test = inputs['pretrain.labels']\n m_test = inputs['pretrain.mask']\n elif phase == 'train':\n x_train = inputs['train.features']\n y_train = inputs['train.labels']\n m_train = inputs['train.mask']\n x_test = inputs['test.features']\n y_test = inputs['test.labels']\n m_test = inputs['test.mask']\n else:\n print(\"Error: unrecognized phase '%s'\"% phase)\n\n train_stats, test_loss_rmse, params, preds = tf_train.train_tf_graph(\n train_op, total_loss, rmse_loss, ec_loss, l1_loss, param, pred, x, y, m, unsup_inputs, unsup_phys_data,\n x_train, y_train, m_train, x_unsup, p_unsup, x_test, y_test, m_test, x_pred,\n sequence_offset=sequence_offset, seq_per_batch=seq_per_batch, n_epochs=n_epochs, min_epochs_test=min_epochs_test, min_epochs_save=min_epochs_save,\n track_epoch_data=track_epoch_data, restore_path=restore_path, save_path=save_path)\n\n # Track runtime, part 2\n end_time = dt.datetime.now()\n run_time = end_time - start_time\n\n # Save the model diagnostics\n stat_save_file = '%s/stats.npz' % save_path\n np.savez_compressed(stat_save_file, train_stats=train_stats, test_loss_rmse=test_loss_rmse,\n start_time=start_time, end_time=end_time, run_time=run_time)\n print(\" Diagnostics saved to %s\" % stat_save_file)\n\n return(train_stats, test_loss_rmse, params, preds)\n # %% Inspect predictions\n\n # prd has dimensions [depths*batches, n timesteps per batch, 1]\n # xiaowei has a separate script that combines batches into a single time series. he uses the first occurence of each prediction as the final value because it's better (has more preceding info to build on)\n\n # visualize prd_EC, see Create_sparse_data.py\n #prd = np.load('prd_EC_mille.npy')\n","sub_path":"2_model/src/apply_pgnn.py","file_name":"apply_pgnn.py","file_ext":"py","file_size_in_byte":6976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"309247049","text":"import pymel.core as pymel\nfrom classRigCtrl import RigCtrl\nfrom classRigPart import RigPart\nfrom classRigNode import RigNode\nfrom omtk.libs import libRigging, libAttr, libFormula, libPymel\nfrom classNameMap import NameMap\nimport functools\n\nclass CtrlIk(RigCtrl):\n kAttrName_State = 'ikFk'\n\n def build(self, *args, **kwargs):\n super(CtrlIk, self).build(*args, **kwargs)\n assert(self.node is not None)\n pymel.addAttr(self.node, longName=self.kAttrName_State)\n self.m_attState = getattr(self.node, self.kAttrName_State)\n return self.node\n\n def unbuild(self, *args, **kwargs):\n super(CtrlIk, self).unbuild(*args, **kwargs)\n self.m_attState = None\n\n\nclass CtrlIkSwivel(RigCtrl):\n def build(self, _oLineTarget=False, *args, **kwargs):\n super(CtrlIkSwivel, self).build(*args, **kwargs)\n assert(self.node is not None)\n oMake = self.node.getShape().create.inputs()[0]\n oMake.radius.set(oMake.radius.get() * 0.5)\n oMake.degree.set(1)\n oMake.sections.set(4)\n\n # Create line\n if _oLineTarget is True:\n oCtrlShape = self.node.getShape()\n oLineShape = pymel.createNode('annotationShape')\n oLineTransform = oLineShape.getParent()\n pymel.connectAttr(oCtrlShape.worldMatrix, oLineShape.dagObjectMatrix[0], force=True)\n oLineTransform.setParent(self.offset)\n pymel.pointConstraint(_oLineTarget, oLineTransform)\n\n return self.node\n\nclass SoftIkNode(RigNode):\n def build(self):\n self.node = pymel.createNode('network')\n formula = libFormula.Formula()\n fnAddAttr = functools.partial(libAttr.addAttr, self.node, hasMinValue=True, hasMaxValue=True)\n formula.inMatrixS = fnAddAttr(longName='inMatrixS', dt='matrix')\n formula.inMatrixE = fnAddAttr(longName='inMatrixE', dt='matrix')\n formula.inRatio = fnAddAttr(longName='inRatio', at='float')\n formula.inScale = fnAddAttr(longName='inScale', at='float')\n formula.inStretch = fnAddAttr(longName='inStretch', at='float')\n formula.inChainLength = fnAddAttr(longName='inChainLength', at='float', defaultValue=1.0)\n #fnAddAttr(longName='outTranslation', dt='float3')\n #fnAddAttr(longName='outStretch', dt='float')\n\n # inDistance is the distance between the start of the chain and the ikCtrl\n formula.inDistance = \"inMatrixS~inMatrixE\"\n # distanceSoft is the distance before distanceMax where the softIK kick in.\n # ex: For a chain of length 10.0 with a ratio of 0.1, the distanceSoft will be 1.0.\n formula.distanceSoft = \"inChainLength*inRatio\"\n # distanceSafe is the distance where there's no softIK.\n # ex: For a chain of length 10.0 with a ratio of 0.1, the distanceSafe will be 9.0.\n formula.distanceSafe = \"inChainLength-distanceSoft\"\n # This represent the soft-ik state\n # When the soft-ik kick in, the value is 0.0.\n # When the stretch kick in, the value is 1.0.\n # |-----------|-----------|----------|\n # -1 0.0 1.0 +++\n # -dBase dSafe dMax\n formula.deltaSafeSoft = \"(inDistance-distanceSafe)/distanceSoft\"\n # Hack: Prevent potential division by zero when soft-ik is desactivated\n formula.deltaSafeSoft = libRigging.CreateUtilityNode('condition',\n firstTerm=formula.distanceSoft,\n secondTerm=0.0,\n colorIfTrueR=0.0,\n colorIfFalseR=formula.deltaSafeSoft\n ).outColorR\n\n # outDistanceSoft is the desired ikEffector distance from the chain start after aplying the soft-ik\n # If there's no stretch, this will be directly applied to the ikEffector.\n # If there's stretch, this will be used to compute the amount of stretch needed to reach the ikCtrl while preserving the shape.\n formula.outDistanceSoft = \"(distanceSoft*(1-(e^(deltaSafeSoft*-1))))+distanceSafe\"\n\n # Affect ikEffector distance only where inDistance if bigger than distanceSafe.\n formula.outDistance = libRigging.CreateUtilityNode('condition',\n operation=2,\n firstTerm=formula.deltaSafeSoft,\n secondTerm=0.0,\n colorIfTrueR=formula.outDistanceSoft,\n colorIfFalseR=formula.inDistance\n ).outColorR\n # Affect ikEffector when we're not using stretching\n formula.outDistance = libRigging.CreateUtilityNode('blendTwoAttr',\n input=[formula.outDistance,formula.inDistance],\n attributesBlender=formula.inStretch).output\n\n\n #\n # Handle Stretching\n #\n\n # If we're using softIk AND stretchIk, we'll use the outRatioSoft to stretch the joints enough so that the ikEffector reach the ikCtrl.\n formula.outStretch = \"inDistance/outDistanceSoft\"\n\n # Apply the softIK only AFTER the distanceSafe\n formula.outStretch = libRigging.CreateUtilityNode('condition',\n operation=2,\n firstTerm=formula.inDistance,\n secondTerm=formula.distanceSafe,\n colorIfTrueR=formula.outStretch,\n colorIfFalseR=1.0\n ).outColorR\n\n # Apply stretching only if inStretch is ON\n formula.outStretch = libRigging.CreateUtilityNode('blendTwoAttr',\n input=[1.0, formula.outStretch],\n attributesBlender=formula.inStretch).output\n\n\n #\n # Connect outRatio and outStretch to our softIkNode\n #\n #fnAddAttr(longName='outTranslation', dt='float3')\n formula.outRatio = \"outDistance/inDistance\"\n attOutRatio = fnAddAttr(longName='outRatio', at='float')\n pymel.connectAttr(formula.outRatio, attOutRatio)\n\n attOutStretch = fnAddAttr(longName='outStretch', at='float')\n pymel.connectAttr(formula.outStretch, attOutStretch)\n\n# Todo: Support more complex IK limbs (ex: 2 knees)\nclass IK(RigPart):\n def __init__(self, *args, **kwargs):\n super(IK, self).__init__(*args, **kwargs)\n self.iCtrlIndex = 2\n self.ctrlIK = None\n self.ctrl_swivel = None\n\n def calc_swivel_pos(self):\n p3ChainS = self.input[0].getTranslation(space='world')\n p3ChainE = self.input[self.iCtrlIndex].getTranslation(space='world')\n fRatio = self.input[1].t.get().length() / self._chain_length\n p3SwivelBase = (p3ChainE - p3ChainS) * fRatio + p3ChainS\n p3SwivelDir = (self.input[1].getTranslation(space='world') - p3SwivelBase).normal()\n return p3SwivelBase + p3SwivelDir * self._chain_length\n\n def __debug(self, attr, scale=1.0, name=None):\n parent = pymel.createNode('transform')\n #if name: parent.rename(name + '_parent')\n loc = pymel.spaceLocator()\n if name: loc.rename(name)\n loc.setParent(parent)\n #if name: loc.rename(name)\n pymel.connectAttr(attr, loc.ty)\n parent.scale.set(scale, scale, scale)\n\n def build(self, _bOrientIkCtrl=True, *args, **kwargs):\n super(IK, self).build(*args, **kwargs)\n\n # Duplicate input chain (we don't want to move the hierarchy)\n # Todo: implement a duplicate method in omtk.libs.libPymel.PyNodeChain\n # Create ikChain and fkChain\n self._chain_ik = pymel.duplicate(self.input, renameChildren=True, parentOnly=True)\n for oInput, oIk, in zip(self.input, self._chain_ik):\n pNameMap = NameMap(oInput, _sType='rig')\n oIk.rename(pNameMap.Serialize('ik'))\n self._chain_ik[0].setParent(self._oParent) # Trick the IK system (temporary solution)\n\n oChainS = self._chain_ik[0]\n oChainE = self._chain_ik[self.iCtrlIndex]\n\n # Compute chain length\n self._chain_length = self._chain.length()\n\n # Compute swivel position\n p3SwivelPos = self.calc_swivel_pos()\n\n # Create ikChain\n grp_ikChain = pymel.createNode('transform', name=self._pNameMapRig.Serialize('ikChain'), parent=self.grp_rig)\n grp_ikChain.setMatrix(oChainS.getMatrix(worldSpace=True), worldSpace=True)\n oChainS.setParent(grp_ikChain)\n\n # Create ikEffector\n self._oIkHandle, oIkEffector = pymel.ikHandle(startJoint=oChainS, endEffector=oChainE, solver='ikRPsolver')\n self._oIkHandle.rename(self._pNameMapRig.Serialize('ikHandle'))\n self._oIkHandle.setParent(grp_ikChain)\n oIkEffector.rename(self._pNameMapRig.Serialize('ikEffector'))\n\n # Create ctrls\n if not isinstance(self.ctrlIK, CtrlIk): self.ctrlIK = CtrlIk()\n self.ctrlIK.build()\n #self.ctrlIK = CtrlIk(_create=True)\n self.ctrlIK.setParent(self.grp_anm)\n self.ctrlIK.rename(self._pNameMapAnm.Serialize('ik'))\n self.ctrlIK.offset.setTranslation(oChainE.getTranslation(space='world'), space='world')\n if _bOrientIkCtrl is True:\n self.ctrlIK.offset.setRotation(oChainE.getRotation(space='world'), space='world')\n\n if not isinstance(self.ctrl_swivel, CtrlIkSwivel): self.ctrl_swivel = CtrlIkSwivel()\n self.ctrl_swivel.build()\n #self.ctrl_swivel = CtrlIkSwivel(_oLineTarget=self.input[1], _create=True)\n self.ctrl_swivel.setParent(self.grp_anm)\n self.ctrl_swivel.rename(self._pNameMapAnm.Serialize('ikSwivel'))\n self.ctrl_swivel.offset.setTranslation(p3SwivelPos, space='world')\n self.ctrl_swivel.offset.setRotation(self.input[self.iCtrlIndex - 1].getRotation(space='world'), space='world')\n self.swivelDistance = self._chain_length # Used in ik/fk switch\n\n #\n # Create softIk node and connect user accessible attributes to it.\n #\n oAttHolder = self.ctrlIK\n fnAddAttr = functools.partial(libAttr.addAttr, hasMinValue=True, hasMaxValue=True)\n attInRatio = fnAddAttr(oAttHolder, longName='SoftIkRatio', niceName='SoftIK', defaultValue=0, minValue=0, maxValue=.5, k=True)\n attInStretch = fnAddAttr(oAttHolder, longName='Stretch', niceName='Stretch', defaultValue=0, minValue=0, maxValue=1.0, k=True)\n\n rig_softIkNetwork = SoftIkNode()\n rig_softIkNetwork.build()\n pymel.connectAttr(attInRatio, rig_softIkNetwork.inRatio)\n pymel.connectAttr(attInStretch, rig_softIkNetwork.inStretch)\n pymel.connectAttr(grp_ikChain.worldMatrix, rig_softIkNetwork.inMatrixS)\n pymel.connectAttr(self.ctrlIK.worldMatrix, rig_softIkNetwork.inMatrixE)\n rig_softIkNetwork.inChainLength.set(self._chain_length)\n\n # Constraint effector\n attOutRatio = rig_softIkNetwork.outRatio\n attOutRatioInv = libRigging.CreateUtilityNode('reverse', inputX=rig_softIkNetwork.outRatio).outputX\n pymel.select(clear=True)\n pymel.select(self.ctrlIK, grp_ikChain, self._oIkHandle)\n constraint = pymel.pointConstraint()\n constraint.rename(constraint.name().replace('pointConstraint', 'softIkConstraint'))\n pymel.select(constraint)\n weight_inn, weight_out = constraint.getWeightAliasList()\n pymel.connectAttr(attOutRatio, weight_inn)\n pymel.connectAttr(attOutRatioInv, weight_out)\n\n # Constraint joints stretch\n attOutStretch = rig_softIkNetwork.outStretch\n num_jnts = len(self._chain_ik)\n for i in range(1, num_jnts):\n obj = self._chain_ik[i]\n pymel.connectAttr(\n libRigging.CreateUtilityNode('multiplyDivide',\n input1X=attOutStretch,\n input1Y=attOutStretch,\n input1Z=attOutStretch,\n input2=obj.t.get()).output,\n obj.t, force=True)\n\n # Connect rig -> anm\n pymel.orientConstraint(self.ctrlIK, oChainE, maintainOffset=True)\n pymel.poleVectorConstraint(self.ctrl_swivel, self._oIkHandle)\n\n # Connect to parent\n if libPymel.is_valid_PyNode(self.parent):\n pymel.parentConstraint(self.parent, grp_ikChain, maintainOffset=True)\n for source, target in zip(self._chain_ik, self._chain):\n pymel.parentConstraint(source, target)\n\n def unbuild(self):\n pass\n","sub_path":"omtk/rigging/autorig/rigIK.py","file_name":"rigIK.py","file_ext":"py","file_size_in_byte":12117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"208526380","text":"\"\"\"empty message\n\nRevision ID: 10c85768e0ef\nRevises: 56d1409ee7ab\nCreate Date: 2019-12-02 13:54:54.967934\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '10c85768e0ef'\ndown_revision = '56d1409ee7ab'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_index('date_created', table_name='packages')\n op.drop_index('date_to_deliver', table_name='packages')\n op.drop_index('name', table_name='packages')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_index('name', 'packages', ['name'], unique=True)\n op.create_index('date_to_deliver', 'packages', ['date_to_deliver'], unique=True)\n op.create_index('date_created', 'packages', ['date_created'], unique=True)\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/10c85768e0ef_.py","file_name":"10c85768e0ef_.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"233612700","text":"#!/usr/bin/env python\n\nimport sys\n\nimport collustro\n\nheirarchy = {'flare': {\n 'analytics': {\n 'cluster': {\n 'AgglomerativeCluster': 3938,\n 'CommunityStructure': 3812,\n 'MergeEdge': 743,\n },\n 'graph': {\n 'BetweennessCentrality': 3534,\n 'LinkDistance': 5731,\n },\n },\n }}\n\npie = {'a': 20, 'b': 50, 'c': 30}\n\n\n#collustro.explore({'pie': pie, 'heirarchy': heirarchy}, debug=True)\ncollustro.explore(locals(), debug=True)\n","sub_path":"tests/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"508820398","text":"# Copyright (c) 2020, Apple Inc. All rights reserved.\n#\n# Use of this source code is governed by a BSD-3-clause license that can be\n# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause\n\nimport itertools\nimport numpy as np\nimport pytest\n\nfrom coremltools.converters.mil.mil import Builder as mb\nfrom coremltools.converters.mil.testing_utils import (\n assert_model_is_valid,\n get_op_types_in_program,\n apply_pass_and_basic_check,\n)\nfrom coremltools.converters.mil import testing_reqs\nfrom coremltools.converters.mil.mil.types import numpy_type_to_builtin_type\n\n\nnp.random.seed(1984)\n\nbackends = testing_reqs.backends\n\ndef _apply_weight_transform(inputs, is_deconv, dtype=np.float32):\n \"\"\"\n Utility funtion to test the weight transform function in conv batch_norm fusion pass.\n \"\"\"\n Cin, _, groups = 10, 20, 10\n input_shape = (1, Cin, 2, 2)\n @mb.program(input_specs=[mb.TensorSpec(shape=input_shape, dtype=numpy_type_to_builtin_type(dtype))])\n def prog(x):\n\n if is_deconv:\n x = mb.conv_transpose(\n x=x,\n weight=inputs[\"conv_weight\"],\n bias=inputs[\"conv_bias\"],\n groups=groups,\n )\n else:\n x = mb.conv(\n x=x,\n weight=inputs[\"conv_weight\"],\n bias=inputs[\"conv_bias\"],\n groups=groups,\n )\n\n x = mb.batch_norm(\n x=x,\n mean=inputs[\"mean\"],\n variance=inputs[\"variance\"],\n gamma=inputs[\"gamma\"],\n beta=inputs[\"beta\"],\n epsilon=inputs[\"epsilon\"],\n )\n return x\n\n apply_pass_and_basic_check(\n prog, \"common::fuse_conv_batchnorm\"\n )\n\n # get the updated weight from the prog\n conv_op = []\n for op in prog[\"main\"].operations:\n if op.op_type == \"const\":\n continue\n conv_op.append(op)\n assert len(conv_op) == 1, \"should only have one conv / conv_transpose layer.\"\n\n return conv_op[0].weight.val, conv_op[0].bias.val\n\n\nclass TestConvBatchNormOptimizationPasses:\n\n @pytest.mark.parametrize(\n \"conv_type\",\n [\"conv\", \"conv_transpose\"],\n )\n def test_weight_transform_conv_identity(self, conv_type):\n \"\"\"\n Test the weight transform function with an identity batchnorm layer.\n \"\"\"\n # parameters for conv\n is_deconv = conv_type == \"conv_type\"\n conv_weight = np.arange(20).astype(np.float32)\n conv_weight = np.reshape(conv_weight, (10, 2, 1, 1)) if is_deconv else np.reshape(conv_weight, (20, 1, 1, 1))\n conv_bias = np.arange(20).astype(np.float32)\n\n # parameters for batch_norm\n gamma = np.ones(20).astype(np.float32)\n beta = np.zeros(20).astype(np.float32)\n mean = np.zeros(20).astype(np.float32)\n variance = np.ones(20).astype(np.float32)\n epsilon = 0.\n\n inputs = {\n \"conv_weight\": conv_weight,\n \"conv_bias\": conv_bias,\n \"gamma\": gamma,\n \"beta\": beta,\n \"mean\": mean,\n \"variance\": variance,\n \"epsilon\": epsilon,\n }\n\n new_conv_weight, new_conv_bias = _apply_weight_transform(inputs, is_deconv)\n\n np.testing.assert_equal(new_conv_weight, conv_weight)\n np.testing.assert_equal(new_conv_bias, conv_bias)\n\n\n @pytest.mark.parametrize(\n \"conv_type, dtype\",\n itertools.product(\n [\"conv\", \"conv_transpose\"],\n [np.float16, np.float32],\n ),\n )\n def test_weight_transform_conv_type(self, conv_type, dtype):\n \"\"\"\n The weight transform function should return an updated conv weight with correct data type\n \"\"\"\n # parameters for conv\n is_deconv = conv_type == \"conv_type\"\n conv_weight = np.arange(20).astype(dtype)\n conv_weight = np.reshape(conv_weight, (10, 2, 1, 1)) if is_deconv else np.reshape(conv_weight, (20, 1, 1, 1))\n conv_bias = np.arange(20).astype(dtype)\n\n # parameters for batch_norm\n gamma = np.ones(20).astype(dtype)\n beta = np.zeros(20).astype(dtype)\n mean = np.zeros(20).astype(dtype)\n variance = np.ones(20).astype(dtype)\n epsilon = dtype(0.1)\n\n inputs = {\n \"conv_weight\": conv_weight,\n \"conv_bias\": conv_bias,\n \"gamma\": gamma,\n \"beta\": beta,\n \"mean\": mean,\n \"variance\": variance,\n \"epsilon\": epsilon,\n }\n\n new_conv_weight, _ = _apply_weight_transform(inputs, is_deconv, dtype)\n\n assert new_conv_weight.dtype == dtype, \"the weight transform function should retain the weight's original dtype.\"\n\n\n @pytest.mark.parametrize(\n \"rank, groups, has_bias, backend\",\n itertools.product([3, 4], [1, 2, 10], [False, True], backends),\n )\n def test_conv(self, rank, groups, has_bias, backend):\n \"\"\"\n Input graph:\n input -----> conv -----> batch_norm ---> out\n\n Output graph:\n input -----> conv ----> out\n \"\"\"\n Cin, Cout = 10, 30\n input_shape = (2, Cin, 20) if rank == 3 else (2, Cin, 20, 24)\n\n @mb.program(input_specs=[mb.TensorSpec(shape=input_shape)])\n def prog(x):\n # conv layer\n conv_weight = np.random.rand(Cout, Cin // groups, 2) if rank == 3 else np.random.rand(Cout, Cin // groups, 2, 3)\n conv_bias = np.random.rand(Cout) if has_bias else None\n x = mb.conv(\n x=x,\n weight=conv_weight,\n bias=conv_bias,\n groups=groups,\n )\n\n # batch_norm layer\n gamma = np.random.rand(Cout)\n beta = np.random.rand(Cout)\n mean = np.random.rand(Cout)\n variance = np.random.rand(Cout)\n epsilon = 1e-2\n x = mb.batch_norm(\n x=x,\n mean=mean,\n variance=variance,\n gamma=gamma,\n beta=beta,\n epsilon=epsilon,\n )\n return x\n\n prev_prog, prev_block, block = apply_pass_and_basic_check(\n prog, \"common::fuse_conv_batchnorm\"\n )\n\n assert get_op_types_in_program(prev_prog) == [\"conv\", \"batch_norm\"]\n assert get_op_types_in_program(prog) == [\"conv\"]\n\n # validate graph pass\n output_shape = (2, Cout, 19) if rank == 3 else (2, Cout, 19, 22)\n assert_model_is_valid(\n prog,\n {\"x\": input_shape},\n expected_output_shapes={block.outputs[0].name: output_shape},\n backend=backend,\n )\n\n\n @pytest.mark.parametrize(\n \"rank, groups, has_bias, backend\",\n itertools.product([3, 4], [1, 2, 10], [False, True], backends),\n )\n def test_conv_transpose(self, rank, groups, has_bias, backend):\n \"\"\"\n Input graph:\n input -----> conv_transpose -----> batch_norm ---> out\n\n Output graph:\n input -----> conv_transpose ----> out\n \"\"\"\n Cin, Cout = 10, 30\n input_shape = (2, Cin, 20) if rank == 3 else (2, Cin, 20, 24)\n\n @mb.program(input_specs=[mb.TensorSpec(shape=input_shape)])\n def prog(x):\n # conv layer\n conv_weight = np.random.rand(Cin, Cout // groups, 2) if rank == 3 else np.random.rand(Cin, Cout // groups, 2, 3)\n conv_bias = np.random.rand(Cout) if has_bias else None\n x = mb.conv_transpose(\n x=x,\n weight=conv_weight,\n bias=conv_bias,\n groups=groups,\n )\n\n # batch_norm layer\n gamma = np.random.rand(Cout)\n beta = np.random.rand(Cout)\n mean = np.random.rand(Cout)\n variance = np.random.rand(Cout)\n\n epsilon = 1e-5\n x = mb.batch_norm(\n x=x,\n mean=mean,\n variance=variance,\n gamma=gamma,\n beta=beta,\n epsilon=epsilon,\n )\n return x\n\n prev_prog, prev_block, block = apply_pass_and_basic_check(\n prog, \"common::fuse_conv_batchnorm\"\n )\n\n assert get_op_types_in_program(prev_prog) == [\"conv_transpose\", \"batch_norm\"]\n assert get_op_types_in_program(prog) == [\"conv_transpose\"]\n\n # validate graph pass\n output_shape = (2, Cout, 21) if rank == 3 else (2, Cout, 21, 26)\n assert_model_is_valid(\n prog,\n {\"x\": input_shape},\n expected_output_shapes={block.outputs[0].name: output_shape},\n backend=backend,\n )\n","sub_path":"coremltools/converters/mil/mil/passes/test_conv_batchnorm_fusion_pass.py","file_name":"test_conv_batchnorm_fusion_pass.py","file_ext":"py","file_size_in_byte":8782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"617399944","text":"########## VERSION 4.0 ####################\n# University of Tennessee at Chattanooga\n# RES Lab / UTChattSat Club\n# Stephen Lawrence (rdt249@mocs.utc.edu)\n'''\nThis script can be imported into another python script with the line:\n import memex as x\n\nThen you can use the functions to script your own tests, for example:\n x.status() # get the status of each DUT\n x.fill() # fill the DUTs with a pattern (default 0x55)\n x.hold() # hold the DUTs until user hits ENTER\n x.scan() # scan the DUTs for faults\nIf using the command line, use SHIFT+ENTER to separate your lines.\n\nYou can also change the settings within your test automation:\n x.size = [1,1,1,1] # limit all DUTs to 1 kB\n x.check() # find the address of every fault\nAll settings reset to the defaults below when you restart your python kernel.\n'''\n############ SETTINGS #####################\n\n# default group settings\nnominal = [3.3,3.3,3.3,3.3] # nominal voltage (by group)\nvoltage = [0.5,1.0,1.5,2.0] # holding voltage (by group)\npattern = [0x55,0x55,0x55,0x55] # fill and check pattern (by group)\nsize = [4096,4096,4096,4096] # size of memory (by group) in kB\n\n# default log settings\nlog_file = 'data/log.csv'\nlog_header = ['Command(str)',\n 'NominalA(V)','NominalB(V)','NominalC(V)','NominalD(V)',\n 'SizeA(kB)','SizeB(kB)','SizeC(kB)','SizeD(kB)',\n 'HoldTime(s)',\n 'HoldVoltsA(V)','HoldVoltsB(V)','HoldVolts(C)','HoldVolts(D)',\n 'PatternA(dec)','PatternB(dec)','PatternC(dec)','PatternD(dec)',\n 'StatusA1(bool)','StatusA2(bool)','StatusA3(bool)','StatusB1(bool)','StatusB2(bool)','StatusB3(bool)',\n 'StatusC1(bool)','StatusC2(bool)','StatusC3(bool)','StatusD1(bool)','StatusD2(bool)','StatusD3(bool)',\n 'TotalA(#)','TotalB(#)','TotalC(#)','TotalD(#)',\n 'FaultsA1(#)','FaultsA2(#)','FaultsA3(#)','FaultsB1(#)','FaultsB2(#)','FaultsB3(#)',\n 'FaultsC1(#)','FaultsC2(#)','FaultsC3(#)','FaultsD1(#)','FaultsD2(#)','FaultsD3(#)',\n 'Output(file)']\n\n################# FUNCTIONS ######################\n\nfrom datetime import datetime\nfrom os import path\nfrom csv import writer,QUOTE_MINIMAL\ndef log(data, header, file = \"data/log.csv\", timestamp = True): # create or update log given a data row along with headers\n if timestamp :\n now = datetime.now()\n date = now.strftime('%Y-%m-%d')\n time = now.strftime('%H:%M:%S')\n header = ['Date(y-m-d)','Time(H:M:S)'] + header\n data = [date,time] + data\n if path.isfile(file) is False: # check if file exists\n with open(file, 'w') as csvfile: # open file in write mode\n filewriter = writer(csvfile,quoting=QUOTE_MINIMAL) # make csv writer\n filewriter.writerow(header) # write column labels\n with open(file,'a') as csvfile: # open file in append mode\n filewriter = writer(csvfile,quoting=QUOTE_MINIMAL) # make csv writer\n filewriter.writerow(data) # write data\n return file\n\ndef bytes2bits(data) :\n result = []\n for i in range(len(data)) :\n result += [(data[i] >> b) & 1 for b in range(8)][::-1]\n return result\n\nfrom numpy import nan\ndef status(save_log=True) : # check status (1 for good, 0 for bad) for each DUT\n awake = [1,1,1,1,1,1,1,1,1,1,1,0]\n if save_log :\n #command nominal size thold tvolts pattern status totals faults file \n log_data = ['STATUS'] + nominal + size + [None] + [None]*4 + [None]*4 + awake + [None]*4 + [None]*12 +[None]\n log(log_data,log_header,log_file)\n return awake\n\ndef fill() : # fill all DUTs with pattern set by memex.pattern\n awake = status(save_log=False)\n #command nominal size thold tvolts pattern status totals faults file \n log_data = ['FILL'] + nominal + size + [None] + [None]*4 + pattern + awake + [None]*4 + [None]*12 +[None]\n log(log_data,log_header,log_file)\n return pattern\n\nfrom time import sleep,time\ndef hold(t=None) : # hold all DUTs at voltage set by memex.voltage\n awake = status(save_log=False)\n print('\\tholding @',voltage,'...')\n if t is None :\n try :\n start = time()\n input('\\t(press ENTER to return to nominal)')\n t = time() - start\n except :\n pass\n else :\n sleep(t)\n #command nominal size thold tvolts pattern status totals faults file \n log_data = ['HOLD'] + nominal + size + [t] + voltage + [None]*4 + awake + [None]*4 + [None]*12 +[None]\n log(log_data,log_header,log_file)\n return t,voltage\n\nfrom random import random\nfrom numpy import nan\ndef scan() : # quickly scan all DUTs, report number of faults (comparing to memex.pattern)\n awake = status(save_log=False)\n faults = [nan] * 12\n totals = [nan] * 4\n scale = [50 * (3.3 - v) for v in voltage] # for simulation\n for i in range(4) : # each group\n for j in range(3) : # each member\n if awake[3*i+j] :\n faults[3*i+j] = int(random() * scale[i] + 3*scale[i]) * size[i] # for simulation\n totals[i] = sum(faults[3*i : 3*i+2])\n #command nominal size thold tvolts pattern status totals faults file \n log_data = ['SCAN'] + nominal + size + [None] + [None]*4 + pattern + awake + totals + faults + [None]\n log(log_data,log_header,log_file)\n return faults\n\nfrom numpy import nan\nfrom pandas import DataFrame\ndef save(file='data/temp/save.csv') : # read the contents of all memories, save to output file\n awake = status(save_log=False)\n data = [nan] * 12\n for i in range(4) : # each group\n for j in range(3) : # each member of group\n if awake[3*i+j] :\n temp = []\n for kb in range(size[i]) : # for each kB (refer to memex.size)\n temp += [pattern[i]] * 1024\n data[3*i+j] = temp\n else :\n data[3*i+j] = [nan] * size[i] * 1024\n #command nominal size thold tvolts pattern status totals faults file \n log_data = ['SAVE'] + nominal + size + [None] + [None]*4 + [None] + awake + [None]*4 + [None]*12 + [file]\n log(log_data,log_header,log_file)\n DataFrame(data,index=['A1','A2','A3','B1','B2','B3','C1','C2','C3','D1','D2','D3']).to_csv(file)\n return data\n\nfrom numpy import nan\ndef check(file='data/temp/check.csv') : # check the contents of all memories\n awake = status(save_log=False)\n faults = [nan] * 12\n totals = [nan] * 4\n data = [nan] * 12\n scale = [100 * (3.3 - v) for v in voltage] # for simulation\n for i in range(4) : # each group\n for j in range(3) : # each member\n if awake[3*i+j] :\n temp = []\n for kb in range(size[i]) : # for each kb in memex.size[i]\n temp += [int(pattern[i] ^ pattern[i])] * 1024\n data[3*i+j] = temp\n faults[3*i+j] = int(random() * scale[i] + 5*scale[i]) # for simulation\n else :\n data[3*i+j] = [nan] * size[i] * 1024\n totals[i] = sum(faults[3*i : 3*i+2])\n #command nominal size thold tvolts pattern status totals faults file \n log_data = ['CHECK'] + nominal + size + [None] + [None]*4 + pattern + awake + totals + faults + [file]\n log(log_data,log_header,log_file)\n return data\n ","sub_path":"memex.py","file_name":"memex.py","file_ext":"py","file_size_in_byte":7477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"347298187","text":"__author__ = 'liuhf'\n# coding:utf-8\nimport sys,os\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom .BasePage import WebUI\nimport datetime\nfrom selenium.webdriver.support.select import Select\nfrom selenium.webdriver.common.keys import Keys\n\nclass ChangePlatformPage(WebUI):\n otherPlatform = (By.XPATH,\"//input[@id='departName']\")# 转入校区\n selectDepartment = (By.XPATH,\"//treecontrol[@class='tree-classic ng-isolate-scope']/ul/li[1]/i[1]\")#选择部门\n selectNextDepartment = (By.XPATH, \"//treecontrol[@class='tree-classic ng-isolate-scope']/ul/li[1]/treeitem/ul/li[1]/i[1]\") # 选择下级部门\n selectSchool = (By.XPATH,\"//span[contains(text(),'北京和平西桥校区')]\")\n saveSelection = (By.XPATH,\"//button[@ng-click='departmentSelected()']\")#保存选择校区\n changePlatformAmount = (By.XPATH, \"//input[@id='changePlatformAmount']\") # 转出平台业绩\n changeDate = (By.XPATH,\"//input[@ng-model='platform.changePlatformDate']\")#转平台日期\n saveChange = (By.XPATH, \"//button[@ng-click='savePlatformChange()']\") # 提交\n changeConfirm = (By.XPATH, \"//button[@class='confirm']\") # 操作成功提示\n\n #转平台审核\n verifyChange = (By.XPATH, \"//a[contains(text(),'审核')]\") # 审核按钮\n changeButton = (By.XPATH, \"//form[@name='addPlatForm']/div/div[3]/div/button[2]\") # 确认审核通过\n verifyConfirm = (By.XPATH, \"//button[@class='confirm']\") # 操作成功\n def ChangePlatform(self,changePlatformName):\n self.wait5\n click = self.driver.find_element_by_xpath(\"//a[contains(text(),'前台业务')]\")\n ActionChains(self.driver).click(click).perform()\n self.wait5\n\n self.driver.find_element_by_link_text(\"学员管理\").click()\n self.wait5\n self.driver.find_element_by_xpath(\"//*[@id='noMar']\").send_keys(changePlatformName)\n self.wait\n self.driver.find_element_by_xpath(\"//a[@ng-click='getStudentBySome()']\").click()\n self.wait5\n self.driver.find_element_by_xpath(\"//*[@id='nw+0']/span\").click()\n self.wait\n self.driver.find_element_by_xpath(\"//div[@class='ui-bubble']/div/ul/li[11]\").click()\n self.wait5\n\n self.findElement(*self.otherPlatform).click()\n self.wait1\n self.findElement(*self.selectDepartment).click()\n self.wait1\n self.findElement(*self.selectNextDepartment).click()\n self.wait1\n click = self.findElement(*self.selectSchool)\n ActionChains(self.driver).click(click).perform()\n self.wait1\n self.findElement(*self.saveSelection).click()\n self.wait\n\n self.findElement(*self.changePlatformAmount).send_keys('2000')\n self.wait\n\n now = datetime.datetime.now()\n tTimeNow = now.strftime('%Y-%m-%d')\n tTime = \"$(\\\"input[ng-model='platform.changePlatformDate']\\\").removeAttr('readonly');$(\\\"input[ng-model='platform.changePlatformDate']\\\").attr('value',\\\"\" + tTimeNow + \"\\\").trigger('change')\"\n self.driver.execute_script(tTime)\n\n click = self.findElement(*self.saveChange)\n ActionChains(self.driver).click(click).perform()\n self.wait1\n valitext = self.driver.find_element_by_xpath(\"//h2[contains(text(),'转平台操作已成功')]\").text\n context_expxcted = \"转平台操作已成功\"\n self.assertEqual(context_expxcted, valitext)\n self.findElement(*self.changeConfirm).click()\n\n def ChangePlatformVerify(self,changePlatformName):\n self.wait5\n click = self.driver.find_element_by_xpath(\"//a[contains(text(),'首页')]\")\n ActionChains(self.driver).click(click).perform()\n self.wait1\n click = self.driver.find_element_by_xpath(\"//a[@data-target='#OrderManagement']\")\n ActionChains(self.driver).click(click).perform()\n self.wait1\n ActionChains(self.driver).click(click).perform()\n\n ActionChains(self.driver).key_down(Keys.DOWN).perform()\n ActionChains(self.driver).key_up(Keys.DOWN).perform()\n self.wait1\n ActionChains(self.driver).key_down(Keys.DOWN).perform()\n ActionChains(self.driver).key_up(Keys.DOWN).perform()\n self.wait5\n click = self.driver.find_element_by_xpath(\"//ul[@id='OrderManagement']/li[5]\")\n ActionChains(self.driver).click(click).perform()\n self.wait5\n\n count = 1\n pos = 0\n n = 1\n count = len(self.driver.find_elements_by_xpath(\"//tr[@ng-repeat='row in PlatformListFrom']\"))\n\n while (n <= count):\n tableLines = \"//table[@st-pipe='getCrmPlatformList']/tbody/tr[\" + str(n) + \"]/td[1]\"\n name = self.driver.find_element_by_xpath(tableLines).text\n tableLines = \"//table[@st-pipe='getCrmPlatformList']/tbody/tr[\" + str(n) + \"]/td[3]\"\n status = self.driver.find_element_by_xpath(tableLines).text\n\n if ((changePlatformName == name) and (status == \"待转出审核\")):\n pos = n\n break;\n n = n + 1\n\n operation = \"//a[@id='nw+\" + str(n - 1) + \"']\"\n click = self.driver.find_element_by_xpath(operation)\n ActionChains(self.driver).click(click).perform()\n self.wait\n\n click = self.findElement(*self.verifyChange)\n ActionChains(self.driver).click(click).perform()\n self.wait5\n click = self.findElement(*self.changeButton)\n ActionChains(self.driver).click(click).perform()\n self.wait\n\n valitext = self.driver.find_element_by_xpath(\"//h2[contains(text(),'操作成功')]\").text\n context_expxcted = \"操作成功\"\n self.assertEqual(context_expxcted, valitext)\n click = self.findElement(*self.verifyConfirm)\n ActionChains(self.driver).click(click).perform()","sub_path":"Page/changePlatformPage.py","file_name":"changePlatformPage.py","file_ext":"py","file_size_in_byte":5861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"187001753","text":"r'''\nCopyright 2014 Google Inc. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n'''\n\nimport logging\nfrom nogotofail.mitm.connection.handlers.data import handlers\nfrom nogotofail.mitm.connection.handlers.data import DataHandler\nfrom nogotofail.mitm.connection.handlers.store import handler\nfrom nogotofail.mitm.event import connection\nfrom nogotofail.mitm.util import ssl2, tls, vuln\n\n@handler.passive(handlers)\nclass InsecureCipherDetectionHandler(DataHandler):\n name = \"insecurecipherdetection\"\n description = \"Detect insecure ciphers in TLS Client Hellos\"\n\n def _handle_bad_ciphers(self, ciphers, message):\n self.log(logging.ERROR, message)\n self.log_attack_event(data=ciphers)\n self.connection.vuln_notify(vuln.VULN_WEAK_CIPHER)\n\n def on_ssl(self, client_hello):\n\n # Check for anon ciphers, these don't verify the identity of the\n # endpoint\n anon_ciphers = [str(c) for c in client_hello.ciphers if \"_anon_\" in str(c)]\n if anon_ciphers:\n self._handle_bad_ciphers(anon_ciphers,\n \"Client enabled anonymous TLS/SSL cipher suites %s\" %\n (\", \".join(anon_ciphers)))\n\n # Check for NULL encryption ciphers\n null_ciphers = [str(c) for c in client_hello.ciphers if \"_WITH_NULL_\" in str(c)]\n if null_ciphers:\n self._handle_bad_ciphers(null_ciphers,\n \"Client enabled NULL encryption TLS/SSL cipher suites %s\" %\n (\", \".join(null_ciphers)))\n\n # Check for NULL integrity ciphers\n integ_ciphers = [str(c) for c in client_hello.ciphers if str(c).endswith(\"_NULL\")]\n if integ_ciphers:\n self._handle_bad_ciphers(integ_ciphers,\n \"Client enabled NULL integrity TLS/SSL cipher suites %s\" %\n (\", \".join(integ_ciphers)))\n\n\n@handler.passive(handlers)\nclass WeakTLSVersionDetectionHandler(DataHandler):\n name = \"weaktlsversiondetection\"\n description = \"Detect versions of the TLS/SSL protocols that are known to be weak\"\n\n def on_ssl(self, client_hello):\n if isinstance(client_hello, ssl2.types.ClientHello):\n self.log(logging.ERROR, \"Client enabled SSLv2 protocol\")\n self.log_attack_event(data=\"SSLv2\")\n self.connection.vuln_notify(vuln.VULN_WEAK_TLS_VERSION)\n return\n if (isinstance(client_hello, tls.types.ClientHello) and\n client_hello.version.major == 3 and\n client_hello.version.minor == 0):\n # SSLv3 is still used in fallback situations and ngtf tends to cause\n # these fallback situations so we wont notify the client of these\n # vulns to prevent spamming. We will log if TLS_FALLBACK_SCSV is set\n # since it should be set in fallback situations.\n fallback = (\"TLS_FALLBACK_SCSV\" in\n [str(c) for c in client_hello.ciphers])\n if fallback:\n self.log(logging.WARNING,\n \"Client enabled SSLv3 protocol with TLS_FALLBACK_SCSV\")\n else:\n self.log(logging.ERROR,\n \"Client enabled SSLv3 protocol without TLS_FALLBACK_SCSV\")\n self.log_attack_event(data=\"SSLv3\")\n","sub_path":"nogotofail-dev/nogotofail/mitm/connection/handlers/data/ssl.py","file_name":"ssl.py","file_ext":"py","file_size_in_byte":3753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"285405364","text":"import callFuntionFutures\r\nimport time\r\nimport datetime\r\nimport pandas as pd\r\n\r\n#เวอร์ชั่น CSV\r\n\r\nwhile True:\r\n #try:\r\n timeBegin = time.time()\r\n print(datetime.datetime.now().strftime('%H:%M'))\r\n df1 = pd.read_csv('Data.csv')\r\n df = df1.set_index('indexAround')\r\n\r\n Around = df.loc['Around']['Balance'] # ตัวนับ\r\n\r\n df = callFuntionFutures.updatee(df,Around)\r\n df = df.loc[:, ~df.columns.str.contains('^Unnamed')] # ลบคอลัม์ที่ไม่ต้องการ\r\n\r\n pd.set_option('display.max_columns', None)\r\n print(df.loc[Around].to_frame().T)\r\n df = df.reset_index()\r\n df.to_csv(\"Data.csv\", index=False)\r\n\r\n timeEnd = time.time()\r\n timeElapsed = timeEnd - timeBegin\r\n time.sleep(60 - timeElapsed) # ถ่วงเวลา 1 นาที\r\n\r\n #except Exception as e:\r\n # callFuntion.LineNotify('','',e,'error') # ถ้า error ไลน์ไป แจ้งคนเขียน\r\n # break\r\n","sub_path":"Futures/reBalanceCSVft.py","file_name":"reBalanceCSVft.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"499715794","text":"# -*- coding: utf-8 -*-\n\"\"\"\nN-Dimensional Machine Learning Genetic Algorithm\nJoel Weightman\n\nA particle is accelerated at time t_0 at a value of acc_0, at t_1, the acceleration \nchanges to acc_1. Cumulative sum is used for integration. No smoothing.\n\n\"\"\"\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport time\nimport scipy.integrate as integ\nfrom multiprocessing import Pool\n\ndef calculate_result(pop, acc, t_steps, dt, best_index, final, dimensions):\n \n if dimensions == 1:\n if final == False:\n \n v, d = velocity_distance_1d(pop,dt,acc,t_steps)\n \n pop['results'] = np.concatenate((d,v), axis = 1)\n \n return pop\n \n else:\n \n v1 = np.zeros((np.shape(pop['actions'])[0],np.shape(pop['actions'])[1],1))\n \n v1[:,1:t_steps,0] = np.cumsum(pop['actions'][:,:-1]*dt*acc, axis = 1)[:,:,0]\n d1 = np.cumsum(v1*dt, axis = 1)\n \n# plt.figure()\n# plt.scatter(np.arange(np.size(pop['actions'][0,:])),d1[best_index,:,0], s = 1, c='red')\n# plt.figure()\n# plt.scatter(np.arange(np.size(pop['actions'][0,:])),v1[best_index,:,0], s = 1,c='black')\n \n return\n \n elif dimensions == 2:\n if final == False:\n \n v, d_x, d_y = velocity_distance_2d(pop,dt,acc,t_steps)\n \n pop['results'] = np.concatenate((d_x, d_y, v), axis = 1)\n \n return pop\n \n else:\n \n v_x = np.zeros((np.shape(pop['actions'])[0],np.shape(pop['actions'])[1],1))\n v_y = np.zeros((np.shape(pop['actions'])[0],np.shape(pop['actions'])[1],1))\n \n v_x[:,1:t_steps,0] = np.cumsum(np.multiply(pop['actions'][:,:-1,0],acc*dt*np.cos(pop['actions'][:,:-1,1])), axis = 1)\n v_y[:,1:t_steps,0] = np.cumsum(np.multiply(pop['actions'][:,:-1,0],acc*dt*np.sin(pop['actions'][:,:-1,1])), axis = 1)\n \n d_x = integ.cumtrapz(v_x, dx = dt, axis = 1, initial = 0)\n d_y = integ.cumtrapz(v_y, dx = dt, axis = 1, initial = 0)\n \n \n# plt.figure(1)\n# plt.clf()\n# plt.scatter(np.arange(np.size(pop['actions'][0,:,0])),d_x[best_index,:,0], s = 1, c='red')\n# plt.scatter(np.arange(np.size(pop['actions'][0,:,0])),d_y[best_index,:,0], s = 1, c='blue')\n# plt.xlabel('Time')\n# plt.ylabel('Distance')\n# plt.pause(0.001)\n# plt.draw()\n# plt.figure(2)\n# plt.clf()\n# plt.scatter(np.arange(np.size(pop['actions'][0,:,0])),np.sqrt(v_x[best_index,:,0]**2 + v_y[best_index,:,0]**2), s = 1,c='black')\n# plt.xlabel('Time')\n# plt.ylabel('Velocity')\n# plt.pause(0.001)\n# plt.draw()\n# plt.figure(3)\n# plt.clf()\n# plt.scatter(d_x[best_index,:,0],d_y[best_index,:,0], s = 1,c='black')\n# plt.xlabel('Distance_X')\n# plt.ylabel('Distance_Y')\n# plt.pause(0.001)\n# plt.draw()\n# plt.figure(4)\n# plt.clf()\n# plt.scatter(np.arange(np.size(pop['actions'][0,:,0])),pop['actions'][best_index,:,1]*180/np.pi, s = 1,c='black')\n# plt.xlabel('Time')\n# plt.ylabel('Angle')\n# plt.pause(0.001)\n# plt.draw()\n \n plt.figure(1)\n plt.clf()\n plt.subplot(3,1,1)\n plt.scatter(np.arange(np.size(pop['actions'][0,:,0])),np.sqrt(v_x[best_index,:,0]**2 + v_y[best_index,:,0]**2), s = 1,c='black')\n plt.xlabel('Time')\n plt.ylabel('Velocity')\n plt.subplot(3,1,2)\n plt.scatter(d_x[best_index,:,0],d_y[best_index,:,0], s = 1,c='black')\n plt.xlabel('Distance_X')\n plt.ylabel('Distance_Y')\n plt.subplot(3,1,3)\n plt.scatter(np.arange(np.size(pop['actions'][0,:,0])),pop['actions'][best_index,:,1]*180/np.pi, s = 1,c='black')\n plt.xlabel('Time')\n plt.ylabel('Angle')\n plt.pause(0.001)\n plt.draw()\n \n return\n\n\ndef fitness(pop, weights, goals, thresh, v_max, dimensions):\n\n pop['actions'][:,-1,:] = 0.0\n loc_val = np.zeros((np.shape(pop['results'])[0]))\n goal_loc_val = np.zeros((np.shape(pop['results'])[0]))\n for dim in range(dimensions):\n \n loc_val += (pop['results'][:,dim] - goals[dim])**2\n# print(loc_val[0])\n goal_loc_val += goals[dim]**2\n# print(goals[dim])\n \n \n loc_val = np.sqrt(loc_val) / np.sqrt(goal_loc_val)\n vel_val = np.abs(pop['results'][:,dimensions] - goals[dimensions]) / (v_max)\n \n goal_val = np.concatenate((loc_val[:,np.newaxis],vel_val[:,np.newaxis]), axis = 1)\n pop['score'] = (weights[0] - weights[0]*goal_val[:,0]) + (weights[1] - weights[1]*goal_val[:,1])\n \n fuel_sum = np.cumsum(np.abs(pop['actions'][:,::-1,0]), axis = 1)[:,::-1]\n nonzero_length = np.argmin(fuel_sum, axis = 1)\n \n temp_fuel_score = (weights[2] - weights[2] * (nonzero_length) / (np.size(pop['actions'],1)))# + (weights[3] - weights[3] * fuel_sum[:,0] / np.size(pop['actions'],1))\n temp_fuel_score[temp_fuel_score > 0.2*weights[2]] = 0.2*weights[2]\n pop['score'] += temp_fuel_score\n\n# print(nonzero_length.min())\n## print((np.size(pop['actions'],1)))\n## print(loc_val.min(), vel_val.min())\n# print((weights[0] - weights[0]*goal_val[0,0]), (weights[1] - weights[1]*goal_val[0,1]), (weights[2] - weights[2] * (nonzero_length[0]) / (np.size(pop['actions'],1))))\n# print((goal_val[0,0]), (goal_val[0,1]), (nonzero_length[0]) / (np.size(pop['actions'],1)))\n# print(weights[0], weights[1], weights[2]*0.2)\n## print((weights[0] - weights[0]*goal_val[:,0]).argmax(), (weights[1] - weights[1]*goal_val[:,1]).argmax(), (weights[2] - weights[2] * (nonzero_length) / (np.size(pop['actions'],1))).argmax())\n## print((weights[2] - weights[2] * (nonzero_length) / (np.size(pop['actions'],1))).max(), (weights[3] - weights[3] * fuel_sum[:,0] / np.size(pop['actions'],1)).max())\n# print(np.sum(weights))\n pop['score'] *= 1/np.sum(weights)\n# print(pop['score'].max())\n \n return pop\n\ndef init_population(pop, dimensions, deg_steps):\n \n if dimensions == 1:\n pop['actions'][:,:,0] = np.random.randint(-1, 2, size = np.shape(pop['actions'][:,:,0]))\n elif dimensions == 2:\n pop['actions'][:,:,0] = np.random.randint(0, 2, size = np.shape(pop['actions'][:,:,0]))\n# pop['actions'][:,:,1] = np.random.uniform(0, 2*np.pi, size = np.shape(pop['actions'][:,:,1]))\n pop['actions'][:,:,1] = np.random.randint(0, 360/deg_steps, size = np.shape(pop['actions'][:,:,1]))*deg_steps*(np.pi/180)\n# print(pop['actions'][:,:,1])\n return pop\n\ndef velocity_distance_1d(pop,dt,acc,t_steps):\n \n v = np.zeros((np.shape(pop['actions'])[0],np.shape(pop['actions'])[1],1))\n\n v[:,1:t_steps,0] = np.cumsum(np.multiply(pop['actions'][:,:-1],acc*dt), axis = 1)[:,:,0]\n d = np.trapz(v, dx = dt, axis = 1)\n \n return v[:,-1], d\n \n\ndef velocity_distance_2d(pop,dt,acc,t_steps):\n \n v_x = np.zeros((np.shape(pop['actions'])[0],np.shape(pop['actions'])[1],1))\n v_y = np.zeros((np.shape(pop['actions'])[0],np.shape(pop['actions'])[1],1))\n\n v_x[:,1:t_steps,0] = np.cumsum(np.multiply(pop['actions'][:,:-1,0],acc*dt*np.cos(pop['actions'][:,:-1,1])), axis = 1)\n v_y[:,1:t_steps,0] = np.cumsum(np.multiply(pop['actions'][:,:-1,0],acc*dt*np.sin(pop['actions'][:,:-1,1])), axis = 1)\n\n d_x = np.trapz(v_x, dx = dt, axis = 1)\n d_y = np.trapz(v_y, dx = dt, axis = 1)\n \n return np.sqrt(v_x[:,-1]**2 + v_y[:,-1]**2), d_x, d_y\n\ndef pop_selection(pop, selection_num, dimensions, angle_mutation, deg_steps):\n \n sorted_pop = np.argsort(pop['score'])[::-1]\n \n elite_pop = sorted_pop[:selection_num[0]]\n selected_pop = sorted_pop[:selection_num[1]]\n lucky_pop = np.random.choice(sorted_pop[selection_num[1]:],size = selection_num[2], replace = False)\n mutated_pop = np.random.choice(selected_pop, size = selection_num[3], replace = False)\n# print(selection_num[3])\n selected_pop = np.setdiff1d(selected_pop,mutated_pop)\n \n actions = pop['actions'][np.concatenate((elite_pop,lucky_pop))]\n \n pop = generate_children(pop, actions, selection_num, selected_pop, mutated_pop, dimensions, angle_mutation, deg_steps)\n \n return pop, [np.array(pop['score'][sorted_pop[0]]),pop['results'][sorted_pop[0]],sorted_pop[0]]\n\ndef generate_children(pop, actions, selection_num, selected_pop, mutated_pop, dimensions, angle_mutation, deg_steps):\n \n mutated_actions = pop['actions'][mutated_pop,:,:]\n mut_num = np.random.randint(1,selection_num[4]+1,size = 1)[0]\n indices = np.random.randint(0,np.size(mutated_actions[0,:,0]), size = (np.size(mutated_actions[:,0,0]),mut_num))\n for i, mut_locs in enumerate(indices):\n if dimensions == 1:\n mutated_actions[i,mut_locs,0] = np.random.randint(-1, 2, size = mut_num)\n elif dimensions == 2:\n mutated_actions[i,mut_locs,0] = np.random.randint(0, 2, size = mut_num)\n if dimensions == 2:\n mut_num = np.random.randint(1,selection_num[4]+1,size = 1)[0]\n indices = np.random.randint(0,np.size(mutated_actions[0,:,0]), size = (np.size(mutated_actions[:,0,0]),mut_num))\n for i, mut_locs in enumerate(indices):\n# mutated_actions[i,mut_locs,1] = np.random.uniform(0, 2*np.pi, size = mut_num)\n mutated_actions[i,mut_locs,1] = np.random.randint(0, 360/deg_steps, size = mut_num)*deg_steps*(np.pi/180)\n# mutated_actions[i,mut_locs,1] = mutated_actions[i,mut_locs,1]*np.random.uniform(1-angle_mutation, 1+angle_mutation, size = mut_num)\n# mutated_actions[i,mut_locs,1] = np.floor((mutated_actions[i,mut_locs,1]*np.random.uniform(1-angle_mutation, 1+angle_mutation, size = mut_num))*180/np.pi)*np.pi/180\n \n random_selection_children_0 = np.random.randint(0,2,size = (int(len(selected_pop)/2),np.size(pop['actions'][0,:,0]),dimensions))\n if dimensions == 2:\n random_selection_children_0[:,:,1] = random_selection_children_0[:,:,0]\n random_selection_children_1 = 1 - random_selection_children_0\n# print(np.shape(random_selection_children_0))\n \n selected_pop_0 = np.random.choice(selected_pop, size = int(len(selected_pop)/2), replace = False)\n selected_pop_1 = np.setdiff1d(selected_pop, selected_pop_0)\n \n children_actions_0 = pop['actions'][selected_pop_0,:,:]*random_selection_children_0 + pop['actions'][selected_pop_1,:,:]*random_selection_children_1\n children_actions_1 = pop['actions'][selected_pop_1,:,:]*random_selection_children_0 + pop['actions'][selected_pop_0,:,:]*random_selection_children_1\n \n pop['actions'] = np.concatenate((actions,mutated_actions,children_actions_0,children_actions_1), axis = 0)\n\n return pop\n\ndef run(A_l, A_v, A_f, A_t, perc_elite, perc_lucky, perc_mutation, mutation_chance, pop_size, generations, t_steps, loc_des_x, loc_des_y, vel_des, ideal_time_perc, dt, dimensions, samples, angle_mutation):\n \n# plt.close(\"all\")\n \n # Threshold Values\n thresh_loc = 0.0\n thresh_vel = 0.0\n thresh_time = 0.0\n thresh_perf = 1e-5\n \n deg_steps = 5\n \n thresh = np.array([thresh_loc, thresh_vel, thresh_time])\n A_f = 0.0\n weights = np.array([A_l, A_v, A_t, A_f])\n if dimensions == 1:\n goals = np.array([loc_des_x, vel_des, ideal_time_perc])\n elif dimensions == 2:\n goals = np.array([loc_des_x, loc_des_y, vel_des, ideal_time_perc])\n \n ## Conditions to set\n gen_count = np.zeros((2,samples))\n \n for KK in range(samples):\n start_time = time.time()\n \n pop = dict({'actions': np.zeros((pop_size,t_steps,dimensions)), 'results':np.zeros((pop_size,dimensions + 1)), 'score':np.zeros((pop_size))})\n \n ## Calculated Parameters\n pop_num_elite = int(perc_elite*pop_size)\n if pop_num_elite < 2:\n pop_num_elite = 2\n pop_num_mutation = int(perc_mutation*pop_size)\n if pop_num_mutation < 1:\n pop_num_mutation = 1\n pop_num_lucky = int((pop_size*perc_lucky))\n if pop_num_lucky < 1:\n pop_num_lucky = 1\n total_non_children = pop_num_lucky + pop_num_elite + pop_num_mutation\n pop_num_selected = pop_size - total_non_children + pop_num_mutation\n if np.mod(pop_num_selected - pop_num_mutation,2) != 0:\n pop_num_selected += 1\n pop_num_elite -= 1\n \n mutation_gene_num = int(mutation_chance*t_steps)\n if mutation_gene_num < 2:\n mutation_gene_num = 2\n selection_num = np.array([pop_num_elite, pop_num_selected, pop_num_lucky, pop_num_mutation, mutation_gene_num])\n if dimensions == 1:\n v_max = (loc_des_x)/(dt*t_steps*ideal_time_perc/2)\n elif dimensions == 2:\n v_max = np.sqrt(loc_des_x**2 + loc_des_y**2)/(dt*t_steps*ideal_time_perc/2)\n \n acc = v_max/(dt*t_steps*ideal_time_perc/2)\n theory_max = (A_l + A_v + A_t*(1-ideal_time_perc))/(A_l + A_v + A_t)\n \n COUNT = 0\n ## First iteration\n pop = init_population(pop, dimensions, deg_steps)\n ## Calculate Generations until convergence or theory max \n for I in range(generations):\n \n pop = calculate_result(pop, acc, t_steps, dt, [], False, dimensions)\n pop = fitness(pop, weights, goals, thresh, v_max, dimensions)\n pop, best_performance = pop_selection(pop, selection_num, dimensions, angle_mutation, deg_steps)\n \n if np.mod(I,100) == 0 and I != 0:\n print('t: %d, Pop: %d, Run: %1d, Max Perf: %3.3f' % (t_steps,pop_size,KK, theory_max))\n if dimensions == 1:\n print('Gen %1d Performance %3.3f, Distance = %3.1f, Velocity = %3.3f' % (I, best_performance[0], best_performance[1][0], best_performance[1][1]))\n elif dimensions == 2:\n print('Gen %1d Performance %3.3f, Distance_x = %3.1f, Distance_y = %3.1f, Velocity = %3.3f' % (I, best_performance[0], best_performance[1][0], best_performance[1][1], best_performance[1][2]))\n# for III in range(t_steps):\n# print(pop['actions'][0,III,1]*180/(np.pi))\n# calculate_result(pop, acc, t_steps, dt, 0, True, dimensions)\n if I == generations - 1:\n print('t: %d, Pop: %d, Run: %1d, Max Perf: %3.3f' % (t_steps,pop_size,KK, theory_max))\n if dimensions == 1:\n print('Gen %1d Performance %3.3f, Distance = %3.1f, Velocity = %3.3f' % (I, best_performance[0], best_performance[1][0], best_performance[1][1]))\n elif dimensions == 2:\n print('Gen %1d Performance %3.3f, Distance_x = %3.1f, Distance_y = %3.1f, Velocity = %3.3f' % (I, best_performance[0], best_performance[1][0], best_performance[1][1], best_performance[1][2]))\n# calculate_result(pop, acc, t_steps, dt, 0, True, dimensions)\n \n if best_performance[0] >= theory_max-thresh_perf:\n COUNT += 1\n if COUNT >= 2:\n if dimensions == 1:\n print('Gen %1d Performance %3.3f, Distance = %3.1f, Velocity = %3.3f' % (I, best_performance[0], best_performance[1][0], best_performance[1][1]))\n elif dimensions == 2:\n print('Gen %1d Performance %3.3f, Distance_x = %3.1f, Distance_y = %3.1f, Velocity = %3.3f' % (I, best_performance[0], best_performance[1][0], best_performance[1][1], best_performance[1][2]))\n \n break\n \n# print(A_l, A_v, A_t)\n elapsed_time = time.time() - start_time\n gen_count[0,KK] = I\n gen_count[1,KK] = elapsed_time\n\n gen_count_stats = np.zeros((3))\n gen_count_stats[:2] = np.mean(gen_count,1)\n gen_count_stats[2] = np.sqrt(np.var(gen_count[0,:]))\n \n return gen_count_stats, pop, best_performance, acc, dt\n\ndef n_d_runfile(dimensions = 1, loc_des_x = 1, loc_des_y = 1, vel_des = 1, t_steps = 1, pop_size = 1, A_l = 1, A_v = 1, A_f = 1, A_t = 1, perc_elite = 1, perc_lucky = 1, perc_mutation = 1, perc_selected = 1, mutation_chance = 1, angle_mutation = 1, samples = 1, generations = 1):\n\n dt = 1.0\n ideal_time_perc = 0.8\n perc_elite, perc_lucky, perc_mutation, perc_selected = np.array([perc_elite, perc_lucky, perc_mutation, perc_selected])/(perc_elite + perc_lucky + perc_mutation + perc_selected)\n\n gen_count_stats, pop, best_performance, acc, dt = run(A_l, A_v, A_f, A_t, perc_elite, perc_lucky, perc_mutation, mutation_chance, pop_size, generations, t_steps, loc_des_x, loc_des_y, vel_des, ideal_time_perc, dt, dimensions, samples, angle_mutation)\n \n calculate_result(pop, acc, t_steps, dt, best_performance[2], True, dimensions)\n print(gen_count_stats)\n print(pop['actions'][0,:,0])\n print(pop['actions'][0,:,1]*180/(np.pi))\n return gen_count_stats\n\n\ndef fitness_ML(pop_ML):\n \n pop_ML['score'] = np.exp(-(pop_ML['results']/100))\n \n return pop_ML\n\ndef init_population_ML(pop_ML):\n \n pop_ML['actions'] = np.random.uniform(0.025,0.975,size = (np.shape(pop_ML['actions'])[0],np.shape(pop_ML['actions'])[1]))\n \n return pop_ML\n\ndef calculate_ML(pop_ML, current_gen, ML_settings):\n \n start_time = time.time()\n gen_stats = np.zeros((np.size(pop_ML['actions'],0)))\n pool = Pool(10)\n \n ML_dimensions, ML_loc_des_x, ML_loc_des_y, ML_vel_des, ML_t_steps, ML_pop_size_max, ML_samples, ML_generations = ML_settings\n \n A = []\n \n for i in range(np.size(pop_ML['actions'],0)):\n A.append((ML_dimensions,ML_loc_des_x,ML_loc_des_y,ML_vel_des,ML_t_steps,int(pop_ML['actions'][i,0]*ML_pop_size_max),pop_ML['actions'][i,1],pop_ML['actions'][i,2],pop_ML['actions'][i,3],pop_ML['actions'][i,4],pop_ML['actions'][i,5],pop_ML['actions'][i,6],pop_ML['actions'][i,7],pop_ML['actions'][i,8],pop_ML['actions'][i,9],pop_ML['actions'][i,10],ML_samples,ML_generations))\n# print(A)\n print('Gen = %d' %(current_gen))\n gen_stats = np.zeros((np.shape(A)[0]))\n# for i in range(np.shape(A)[0]):\n# dimensions, loc_des_x, loc_des_y, vel_des, t_steps, pop_size, A_l, A_v, A_f, A_t, perc_elite, perc_lucky, perc_mutation, perc_selected, mutation_chance, angle_mutation, samples, generations = A[i]\n# gen_stats[i] = n_d_runfile(dimensions, loc_des_x, loc_des_y, vel_des, t_steps, pop_size, A_l, A_v, A_f, A_t, perc_elite, perc_lucky, perc_mutation, perc_selected, mutation_chance, angle_mutation, samples, generations)\n# print(resultant_gen)\n for i, result in enumerate(pool.starmap(n_d_runfile, A)):\n print(current_gen, i, result)\n gen_stats[i] = result[0]\n \n elapsed_time = time.time() - start_time\n print(elapsed_time)\n \n pop_ML['results'] = gen_stats\n pool.close()\n print(np.median(gen_stats))\n \n return pop_ML\n\ndef pop_selection_ML(pop_ML, selection_num_ML):\n \n sorted_pop = np.argsort(pop_ML['score'])[::-1]\n \n elite_pop = sorted_pop[:selection_num_ML[0]]\n selected_pop = sorted_pop[:selection_num_ML[1]]\n if len(selected_pop) == np.shape(pop_ML['actions'])[0]:\n lucky_pop = []\n else:\n lucky_pop = np.random.choice(sorted_pop[selection_num_ML[1]:],size = selection_num_ML[2], replace = False)\n mutated_pop = np.random.choice(selected_pop, size = selection_num_ML[3], replace = False)\n selected_pop = np.setdiff1d(selected_pop,mutated_pop)\n lucky_elite = np.concatenate((elite_pop,lucky_pop))\n if len(lucky_elite) == 0:\n actions = []\n else:\n actions = pop_ML['actions'][lucky_elite]\n \n pop_ML = generate_children_ML(pop_ML, actions, selection_num_ML, selected_pop, mutated_pop)\n \n return pop_ML, [np.array(pop_ML['score'][sorted_pop[0]]),pop_ML['results'][sorted_pop[0]],sorted_pop[0]]\n\ndef generate_children_ML(pop_ML, actions, selection_num_ML, selected_pop, mutated_pop):\n \n mutated_actions = pop_ML['actions'][mutated_pop,:]\n mut_num = np.random.randint(1,selection_num_ML[4]+1,size = 1)[0]\n indices = np.random.randint(0,np.size(mutated_actions[0,:]), size = (np.size(mutated_actions[:,0]),mut_num))\n for i, mut_locs in enumerate(indices):\n mutated_actions[i,mut_locs] = np.random.uniform(0.025, 0.975, size = mut_num)\n \n random_selection_children_0 = np.random.randint(0,2,size = (int(len(selected_pop)/2),np.size(pop_ML['actions'][0,:])))\n random_selection_children_1 = 1 - random_selection_children_0\n \n selected_pop_0 = np.random.choice(selected_pop, size = int(len(selected_pop)/2), replace = False)\n selected_pop_1 = np.setdiff1d(selected_pop, selected_pop_0)\n \n children_actions_0 = pop_ML['actions'][selected_pop_0,:]*random_selection_children_0 + pop_ML['actions'][selected_pop_1,:]*random_selection_children_1\n children_actions_1 = pop_ML['actions'][selected_pop_1,:]*random_selection_children_0 + pop_ML['actions'][selected_pop_0,:]*random_selection_children_1\n\n pop_ML['actions'] = np.concatenate((actions,mutated_actions,children_actions_0,children_actions_1), axis = 0)\n\n return pop_ML\n\ndef run_ML(generations, ML_settings, already_started = 0, pop_ML = None):\n\n pop_size = 50\n t_steps = 11\n \n perc_elite = 0.10\n perc_lucky = 0.05\n perc_mutation = 0.20\n# mutation_chance = 0.05\n \n pop_num_elite = int(perc_elite*pop_size)\n pop_num_mutation = int(perc_mutation*pop_size)\n pop_num_lucky = int((pop_size*perc_lucky))\n total_non_children = pop_num_lucky + pop_num_elite + pop_num_mutation\n pop_num_selected = pop_size - total_non_children + pop_num_mutation\n \n if np.mod(pop_num_selected - pop_num_mutation,2) != 0:\n pop_num_elite += 1\n pop_num_selected -= 1\n \n mutation_gene_num = 1#int(mutation_chance*t_steps)\n selection_num_ML = np.array([pop_num_elite, pop_num_selected, pop_num_lucky, pop_num_mutation, mutation_gene_num])\n \n time_to_locvel_ratio = 0.99\n dist_to_vel_ration = 0.5\n \n if already_started == 0:\n print('Starting')\n pop_ML = dict({'actions': np.zeros((pop_size,t_steps)), 'results':np.zeros((pop_size)), 'score':np.zeros((pop_size))})\n pop_ML = init_population_ML(pop_ML) \n else:\n print('Already Started, but Continuing')\n \n for I in range(generations):\n \n vel_loc_weight = (pop_ML['actions'][:,1] + pop_ML['actions'][:,2])\n bad_time_weights = pop_ML['actions'][:,4] > vel_loc_weight*time_to_locvel_ratio\n pop_ML['actions'][bad_time_weights,4] = (pop_ML['actions'][bad_time_weights,1] + pop_ML['actions'][bad_time_weights,2])*time_to_locvel_ratio\n\n pop_ML = calculate_ML(pop_ML, I, ML_settings)\n pop_ML = fitness_ML(pop_ML)\n pop_ML, best_performance = pop_selection_ML(pop_ML, selection_num_ML)\n \n print('BEST PERFORMANCE AT GEN %d: %3.3f' %(I, best_performance[0]))\n \n filename = 'Population_ML_Gen_Temp.npy'\n np.save(filename, pop_ML)\n \n return pop_ML, best_performance\n\ndef start_ML_ML_optimization(generations):\n \n change_settings = 1\n already_started = 1\n load_old_file = 0\n \n ML_dimensions = 2\n ML_loc_des_x = 100\n ML_loc_des_y = 100*np.tan(60*np.pi/180)\n ML_vel_des = 0\n ML_generations = 1000\n ML_t_steps = 5\n ML_samples = 5\n ML_pop_size_max = 5000\n \n ML_settings = [ML_dimensions, ML_loc_des_x, ML_loc_des_y, ML_vel_des, ML_t_steps, ML_pop_size_max, ML_samples, ML_generations]\n \n if already_started == 0:\n pop_ML, best_performance = run_ML(generations, ML_settings, already_started, [])\n else:\n if load_old_file == 0:\n file_base = str(ML_dimensions) + 'D_' + str(generations) + '_Gens_time_ML_t_' + str(ML_t_steps) + '_samp_' + str(ML_samples) + '_pop_' + str(ML_pop_size_max)\n filename = file_base + '_settings_1.npy'\n pop_ML = np.load(filename).item()\n filename = file_base + '_settings_2.npy'\n ML_settings = np.load(filename).astype(int)\n ML_dimensions, ML_loc_des_x, ML_loc_des_y, ML_vel_des, ML_t_steps, ML_pop_size_max, ML_samples, ML_generations = ML_settings\n ML_settings_temp = np.load(filename) \n ML_loc_des_x, ML_loc_des_y, ML_vel_des = ML_settings_temp[1:4]\n # filename = 'Population_ML_Gen_Temp.npy'\n # pop_ML = np.load(filename).item()\n if change_settings == 1:\n ML_generations = 2000\n ML_t_steps = 10\n# ML_samples = 5\n# ML_dimensions = 2\n# ML_loc_des_y = 100*np.tan(60*np.pi/180)\n generations = 10\n# ML_pop_size_max = 5000\n else:\n file_base = '1D_200_Gens_time_ML_t_50_samp_20_pop_2000'\n filename = file_base + '_settings_1.npy'\n pop_ML = np.load(filename).item()\n \n \n ML_settings = [ML_dimensions, ML_loc_des_x, ML_loc_des_y, ML_vel_des, ML_t_steps, ML_pop_size_max, ML_samples, ML_generations]\n \n pop_ML, best_performance = run_ML(generations, ML_settings, already_started, pop_ML)\n \n filename = str(ML_dimensions) + 'D_' + str(generations) + '_Gens_time_ML_t_' + str(ML_t_steps) + '_samp_' + str(ML_samples) + '_pop_' + str(ML_pop_size_max) + '_settings_1.npy'\n np.save(filename, pop_ML)\n filename = str(ML_dimensions) + 'D_' + str(generations) + '_Gens_time_ML_t_' + str(ML_t_steps) + '_samp_' + str(ML_samples) + '_pop_' + str(ML_pop_size_max) + '_settings_2.npy'\n np.save(filename, ML_settings)\n\n return\n\nif __name__ == \"__main__\":\n \n plt.close('all')\n ### Design Values\n \n optimize_ML = 0\n generations = 20\n already_started = 1 \n \n if optimize_ML == 0:\n \n if already_started == 1:\n ML_t_steps = 5\n ML_samples = 5\n ML_pop_size_max = 5000\n ML_dimensions = 2\n \n file_base = str(ML_dimensions) + 'D_' + str(generations) + '_Gens_time_ML_t_' + str(ML_t_steps) + '_samp_' + str(ML_samples) + '_pop_' + str(ML_pop_size_max)\n# file_base = '1D_100_Gens_ML_t_50_samp_10_pop_1000'\n \n filename = file_base + '_settings_1.npy'\n pop_ML = np.load(filename).item()\n pop_size, A_l, A_v, A_f, A_t, perc_elite, perc_lucky, perc_mutation, perc_selected, mutation_chance, angle_mutation = pop_ML['actions'][0,0],pop_ML['actions'][0,1],pop_ML['actions'][0,2],pop_ML['actions'][0,3],pop_ML['actions'][0,4],pop_ML['actions'][0,5],pop_ML['actions'][0,6],pop_ML['actions'][0,7],pop_ML['actions'][0,8],pop_ML['actions'][0,9],pop_ML['actions'][0,10]\n \n filename = file_base + '_settings_2.npy'\n ML_settings = np.load(filename).astype(int)\n dimensions, loc_des_x, loc_des_y, vel_des, t_steps, pop_size_max, samples, generations = ML_settings\n ML_settings_temp = np.load(filename) \n loc_des_x, loc_des_y, vel_des = ML_settings_temp[1:4]\n pop_size = int(pop_size*pop_size_max)\n \n \n t_steps = 10\n# samples = 1\n# loc_des_y = 100*np.tan(60*np.pi/180)\n# angle_mutation = 0.05\n# dimensions = 2\n# generations = 10000\n# pop_size = 2000\n# A_l = 0.5\n# A_v = 0.5\n# A_t = (A_v+A_t)*0.99\n \n# A_f = 2\n# A_f, A_l, A_t, A_v = [0.5,0.01,0.426522442525838,0.7690407264472648]\n# perc_elite, perc_lucky, perc_mutation, perc_selected, mutation_chance = [0.18,0.12,0.1,0.82,0.13]\n \n \n else:\n dimensions = 2\n loc_des_x = 100\n loc_des_y = 100*np.tan(60*np.pi/180)\n vel_des = 0 \n t_steps = 5\n \n pop_size = 100\n A_l = 0.3\n A_v = 0.6\n A_f = 0\n A_t = 0.2\n \n ## Other Set Params\n perc_elite = 0.2\n perc_lucky = 0.15\n perc_mutation = 0.2\n perc_selected = 0.45\n mutation_chance = 0.4\n angle_mutation = 0.1\n \n generations = 1000\n samples = 1\n \n generation_stats = n_d_runfile(dimensions, loc_des_x, loc_des_y, vel_des, t_steps, pop_size, A_l, A_v, A_f, A_t, perc_elite, perc_lucky, perc_mutation, perc_selected, mutation_chance, angle_mutation, samples, generations)\n \n# plt.figure(1000)\n# plt.plot(pop_size,generation_stats[0],'.k')\n# \n# plt.figure(1002)\n# plt.plot(pop_size,generation_stats[2],'.r')\n# \n# plt.figure(1001)\n# plt.plot(pop_size,generation_stats[1],'.b')\n \n elif optimize_ML == 1:\n start_ML_ML_optimization(generations)\n \n \n # Optimized 20 t_step for\n# time [3.30000000e+01 2.30053663e-02 3.65106365e-02]\n# generations [2.90000000e+01 5.80129623e-02 1.33688030e-02]\n# filename = 'Population_ML_Gen_Temp.npy'\n# pop_ML = np.load(filename).item()","sub_path":"Deprecated/Orbital_Genetic_Algorithm/N_dim_ML.py","file_name":"N_dim_ML.py","file_ext":"py","file_size_in_byte":29483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"502528828","text":"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n# Set up custom environment before nearly anything else is imported\n# NOTE: this should be the first import (no not reorder)\nfrom maskrcnn_benchmark.utils.env import setup_environment # noqa F401 isort:skip\n\nimport argparse\nimport os\n\nimport torch\nfrom maskrcnn_benchmark.config import cfg\nfrom maskrcnn_benchmark.data import make_data_loader\nfrom maskrcnn_benchmark.engine.inference import inference\nfrom maskrcnn_benchmark.modeling.detector import build_detection_model\nfrom maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer\nfrom maskrcnn_benchmark.utils.collect_env import collect_env_info\nfrom maskrcnn_benchmark.utils.comm import synchronize, get_rank\nfrom maskrcnn_benchmark.utils.logger import setup_logger\nfrom maskrcnn_benchmark.utils.miscellaneous import mkdir\n\n# Check if we can enable mixed-precision via apex.amp\ntry:\n from apex import amp\nexcept ImportError:\n raise ImportError('Use APEX for mixed precision via apex.amp')\n\n\ndef main(model_dir):\n parser = argparse.ArgumentParser(description=\"PyTorch Object Detection Inference\")\n parser.add_argument(\n \"--config-file\",\n default=\"/private/home/fmassa/github/detectron.pytorch_v2/configs/e2e_faster_rcnn_R_50_C4_1x_caffe2.yaml\",\n metavar=\"FILE\",\n help=\"path to config file\",\n )\n parser.add_argument(\"--local_rank\", type=int, default=0)\n parser.add_argument(\n \"--ckpt\",\n help=\"The path to the checkpoint for test, default is the latest checkpoint.\",\n default=None,\n )\n parser.add_argument(\n \"opts\",\n help=\"Modify config options using the command-line\",\n default=None,\n nargs=argparse.REMAINDER,\n )\n\n args = parser.parse_args()\n\n num_gpus = int(os.environ[\"WORLD_SIZE\"]) if \"WORLD_SIZE\" in os.environ else 1\n distributed = num_gpus > 1\n\n if distributed:\n torch.cuda.set_device(args.local_rank)\n torch.distributed.init_process_group(\n backend=\"nccl\", init_method=\"env://\"\n )\n synchronize()\n\n cfg.merge_from_file(args.config_file)\n cfg.merge_from_list(args.opts)\n cfg.freeze()\n\n save_dir = \"\"\n logger = setup_logger(\"maskrcnn_benchmark\", save_dir, get_rank())\n logger.info(\"Using {} GPUs\".format(num_gpus))\n logger.info(cfg)\n\n logger.info(\"Collecting env info (might take some time)\")\n logger.info(\"\\n\" + collect_env_info())\n\n model = build_detection_model(cfg)\n model.to(cfg.MODEL.DEVICE)\n\n # Initialize mixed-precision if necessary\n use_mixed_precision = cfg.DTYPE == 'float16'\n amp_handle = amp.init(enabled=use_mixed_precision, verbose=cfg.AMP_VERBOSE)\n\n output_dir = cfg.OUTPUT_DIR\n checkpointer = DetectronCheckpointer(cfg, model, save_dir=output_dir) # TODO change the pth\n ckpt = cfg.MODEL.WEIGHT if args.ckpt is None else args.ckpt\n\n # model_dir = \"/home/zhaowangbo/modify3/salient_segmentation_fuse/salient_segmentaion_fuse/tools/model_0062500.pth\"\n\n _ = checkpointer.load(model_dir)\n\n iou_types = (\"bbox\",)\n if cfg.MODEL.MASK_ON:\n iou_types = iou_types + (\"segm\",)\n if cfg.MODEL.KEYPOINT_ON:\n iou_types = iou_types + (\"keypoints\",)\n output_folders = [None] * len(cfg.DATASETS.TEST)\n dataset_names = cfg.DATASETS.TEST\n if cfg.OUTPUT_DIR:\n for idx, dataset_name in enumerate(dataset_names):\n output_folder = os.path.join(cfg.OUTPUT_DIR, \"inference\", dataset_name)\n mkdir(output_folder)\n output_folders[idx] = output_folder\n\n map050, map055, map060, map065, map070, map075, map080, map085, map090, map095, map = inference(model)\n return map050, map055, map060, map065, map070, map075, map080, map085, map090, map095, map\n\n\nif __name__ == \"__main__\":\n\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"8\"\n results = {}\n # model = [\"50000.pth\", \"52500.pth\", \"55000.pth\", \"57500.pth\", \"60000.pth\", \"62500.pth\", \"65000.pth\", \"67500.pth\", \"70000.pth\"]\n # model = [\"40000.pth\", \"42500.pth\", \"45000.pth\", \"47500.pth\"]\n # \"72500.pth\", \"75000.pth\"]\n model = [\"55000.pth\"]\n # model = [\"55000.pth\"]\n # model = [\"55000.pth\"]\n # model = [\"65000.pth\", \"67500.pth\", \"70000.pth\"]\n # model = [\"45000.pth\", \"47500.pth\"]\n # model = [\"00000.pth\", \"07500.pth\", \"10000.pth\", \"12500.pth\", \"15000.pth\", \"20000.pth\", \"20000.pth\"]\n # model = [\"ceshi.pth\"]\n\n for i in model:\n print(\"start %s\" % i)\n result = {\"map0.50\": None, \"map0.55\": None, \"map0.60\": None, \"map0.65\": None, \"map0.70\": None, \"map0.75\": None,\n \"map0.80\": None, \"map0.85\": None, \"map0.90\": None, \"map0.95\": None, \"map\":None}\n\n model_dir = \"/home/zhaowangbo/SCG/SCG_TIP/base14/tools/model_00\" + i\n map050, map055, map060, map065, map070, map075, map080, map085, map090, map095, map = main(model_dir)\n result[\"map0.50\"] = map050\n result[\"map0.55\"] = map055\n result[\"map0.60\"] = map060\n result[\"map0.65\"] = map065\n result[\"map0.70\"] = map070\n result[\"map0.75\"] = map075\n result[\"map0.80\"] = map080\n result[\"map0.85\"] = map085\n result[\"map0.90\"] = map090\n result[\"map0.95\"] = map095\n result[\"map\"] = map\n\n print(result)\n\n results.update({i: result})\n\n print(results)","sub_path":"tools/test_net.py","file_name":"test_net.py","file_ext":"py","file_size_in_byte":5326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"351986093","text":"# https://leetcode.com/problems/single-number/description/\n\n\"\"\"\nSolution.\nComplexity analysis:\nTime: O(N) - in worst case\nMemory: O(1) - always\n\"\"\"\n\nclass Solution:\n def singleNumber(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n single = 0\n for num in nums:\n single ^= num\n \n return single","sub_path":"Problems/leetcode/Single_Number_136.py","file_name":"Single_Number_136.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"289574194","text":"# Write a function that takes:\n# a list of unsorted_scores\n# the highest_possible_score in the game\n# and returns a sorted list of scores in less than O(nlgn) time.\n\n# Another common way to get O(n)O(n) runtime is to use counting. ↴ We can build a list score_counts where the indices represent scores and the values represent how many times the score appears. Once we have that, can we generate a sorted list of scores?\n\n# Counting is a common pattern in time-saving algorithms. It can often get you O(n)O(n) runtime, but at the expense of adding O(n) space.\n\n# The idea is to define a dictionary or list (call it e.g. counts) where the keys/indices represent the items from the input set and the values represent the number of times the item appears. In one pass through the input you can fully populate counts.\n\n# O(n) time and O(n) space, where nn is the number of scores.\n\n# If we didn't treat highest_possible_score as a constant, \n# we could call it kk and say we have O(n+k) time and O(n+k) space.\n\n\ndef sort_scores(unsorted_scores, highest_possible_score):\n\n # Sort the scores in O(n) time\n\n score_counts = [0] * (highest_possible_score + 1)\n res = []\n \n for score in unsorted_scores:\n score_counts[score] += 1\n \n for i in range(len(score_counts)-1, -1, -1):\n if score_counts[i] > 0:\n # if theres more than 1 of an element we need to loop over the amount of times it appears\n for j in range(score_counts[i]): \n res.append(i)\n \n return res","sub_path":"interview_cake/2. hash_tables_and_sets/top_scores.py","file_name":"top_scores.py","file_ext":"py","file_size_in_byte":1534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"154526206","text":"from flask.ext.bootstrap import Bootstrap\nfrom flask.ext.login import LoginManager\nfrom flask.ext.mail import Mail\nfrom flask.ext.migrate import Migrate\nfrom flask.ext.moment import Moment\nfrom flask.ext.uploads import UploadSet,IMAGES, configure_uploads\nfrom flask_sqlalchemy import SQLAlchemy\n\n# 各种扩展对象\ndb = SQLAlchemy()\nmigrate = Migrate(db=db)\nbs = Bootstrap()\n\n# 发邮件扩展\nmail = Mail()\n\n# 配置LoginManager的配置\nlm = LoginManager()\nlm.login_view = 'userbp.login'\nlm.login_message = 'login required!'\nlm.session_protection = 'strong'\n\n# 上传扩展扩展对象,允许的格式为图片\nphotos = UploadSet('photos', IMAGES)\n\nmoment = Moment()\n\n# 为各种扩展绑定app\n# 使用的是事后绑定——因为app诞生的很晚,但各种扩展被依赖的很早\ndef init_extentions(app):\n\n # 为扩展对象绑定app\n db.init_app(app)\n migrate.init_app(app)\n bs.init_app(app)\n\n # 为mail绑定app\n mail.init_app(app)\n moment.init_app(app)\n lm.init_app(app)\n\n # 绑定app和US对象photos\n configure_uploads(app, photos)","sub_path":"app/extentions.py","file_name":"extentions.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"27528057","text":"from urllib import request\n\n\nurlst = [ \"https://www.yandex.com/\",\n \"http://www.baidu.com\",\n \"http://www.1688.com\",\n \"https://www.zhihu.com/question/20271508\"\n ]\nuf = [2]\n#page = request.Request( urlst[3] )\n#page.add_header( 'User-Agent', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36' )\nheaders = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'} \npage = request.Request( urlst[3], headers = headers )\n\n#function urlopen accept real url or request instance\n#the request module make you visit web site like a browser\n#page is a request instance\nopenre = request.urlopen( page )\ndata = openre.read()\ntry:\n data = data.decode('UTF-8')\n Dco = data.encode( encoding = 'gbk', errors = 'backslashreplace' ) \nexcept Exception as e:\n print(e)\n Dco = 'Error'\nF = open( 'recordt.txt', 'w' )\n#UnicodeEncodeError:gbk codec can’t encode character ...\n# to avoid the above case ,use the function encode with parameter errors = 'backslashreplace'\n#print( Dco )\nwith open( 'zhihu.html', 'wb' ) as f:\n f.write(str.encode(data))\nF.write( str(Dco) )\nF.close()\n","sub_path":"JN_11.py","file_name":"JN_11.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"48535666","text":"\"\"\"\n1. Отсортируйте по убыванию методом \"пузырька\" одномерный целочисленный массив,\nзаданный случайными числами на промежутке [-100; 100). Выведите на экран\nисходный и отсортированный массивы. Сортировка должна быть реализована в\nвиде функции. Обязательно доработайте алгоритм (сделайте его умнее).\n\nИдея доработки: если за проход по списку не совершается ни одной сортировки,\nто завершение\nОбязательно сделайте замеры времени обеих реализаций\nи обосновать дала ли оптимизация эффективность\n\nПодсказка: обратите внимание, сортируем не по возрастанию, как в примере,\nа по убыванию\n\"\"\"\n\nimport random\nfrom timeit import timeit\n\n\ndef bubble_sort_reverse_1(my_list):\n for i in range(len(my_list) - 1):\n for j in range(len(my_list) - 1 - i):\n if my_list[j] < my_list[j + 1]:\n my_list[j], my_list[j + 1] = my_list[j + 1], my_list[j]\n return my_list\n\n\nsorted_list = [12, 11, 10, 9, 8, 5, 3, 2, 1, 0]\nnot_sorted_list = [random.randint(-100, 100) for _ in range(10)]\nprint(bubble_sort_reverse_1(sorted_list))\n\nprint(timeit(\"bubble_sort_reverse_1(sorted_list)\",\n setup=\"from __main__ import bubble_sort_reverse_1, sorted_list\", number=50))\n\n\n# не смог придумать как оптимизировать, чтобы после одного прохода, если не меняется, то вернуть исходный список(\n","sub_path":"Урок 7. Практическое задание/task_1.py","file_name":"task_1.py","file_ext":"py","file_size_in_byte":1862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"574504112","text":"from multilinguist import Multilinguist\nimport math\n\nclass MathGenius(Multilinguist):\n\n def report_total(self, numbers):\n total = sum(numbers)\n translation = self.say_in_local_language(\"The total is {}\".format(total))\n return translation\n\n def floor_of(self, number):\n floor = math.floor(number)\n translation = self.say_in_local_language(\"The largest integer <= {} is {}\".format(number, floor))\n return translation\n\n def is_Prime(self, number):\n if number > 1:\n for i in range(2, number):\n if (number % i) == 0:\n return False\n else:\n return True\n else:\n return False\n\n def is_it_Prime(self, number):\n isPrime = self.is_Prime(number)\n answer = None\n if isPrime:\n answer = \"{} is a prime number.\".format(number)\n else:\n answer = \"{} is not a prime number.\".format(number)\n translated_answer = self.say_in_local_language(answer)\n return translated_answer\n\n# mg = MathGenius()\n# # 2 is \"ODD\" - it's the only even number that is a prime number\n# print(mg.isPrime(10))\n","sub_path":"math_genius.py","file_name":"math_genius.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"233798633","text":"class Solution:\n def orangesRotting(self, grid: List[List[int]]) -> int:\n '''\n BFS\n '''\n if not grid or not grid[0]: return -1\n \n queue = collections.deque()\n fresh_orange = 0\n row, col = len(grid), len(grid[0])\n \n for r in range(row):\n for c in range(col):\n if grid[r][c] == 2:\n queue.append((r,c))\n elif grid[r][c] == 1:\n fresh_orange += 1\n \n ans = 0 \n while queue and fresh_orange:\n ans += 1\n for _ in range(len(queue)): # NOTE\n r, c = queue.popleft()\n directions = [(r+1, c), (r-1, c), (r, c+1), (r, c-1)]\n for x, y in directions:\n if 0 <= x < row and 0 <= y < col and grid[x][y] == 1:\n grid[x][y] = 2\n fresh_orange -= 1\n queue.append((x, y))\n \n return ans if fresh_orange == 0 else -1\n \n # TC: O(r*c)\n \n # SC: O(r*c)\n \n # NOTE: the queue.append() operation wont affect the len(queue) of current level\n \n # ref: https://leetcode.com/problems/rotting-oranges/discuss/563686/\n","sub_path":"994_RottingOranges/994_RottingOranges.py","file_name":"994_RottingOranges.py","file_ext":"py","file_size_in_byte":1244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"511143417","text":"#!/usr/bin/env python\n#\n# Gianluigi Cosari - gianluigi@cosari.it\n# IAA Ver 1.0 July 2017\n# Licensed under MIT\n\n#Standard and PyPI libraries\nimport os, sys, time, signal, logging, ConfigParser, MySQLdb, getpass, telegram\nimport daemon\nimport daemon.pidfile\nfrom logging.handlers import TimedRotatingFileHandler\nfrom threading import Thread\nfrom Queue import Queue\n\n#Application specific libraries\nfrom iaalib.iaalib import MQTT_listener, OneWire_scanner, Telegram_messenger, Telegram_updater, Master_timer, Wiegand_reader, I2C_handler\nfrom iaalib.qprocessor import Queue_processor\nfrom iaalib.fsm import IAAmodel\n\n\n#Retrieves an item from configuration file\ndef get_config_item(section, name, default):\n\ttry:\n\t\tvalue = config.get(section, name)\n\texcept:\n\t\tvalue = default\n\treturn value\n\t\n\n#Application name is derived from the script file name\nappname = os.path.splitext(__file__)[0]\n\n#Reading configuration file\nconfig = ConfigParser.RawConfigParser()\nconfig.read(['/etc/' + appname + '.conf', appname + '.conf'])\npid_file = get_config_item('daemon', 'pid_file', '/var/run/' + appname + '.pid')\n\n#Get current user\ncuruser = getpass.getuser()\n\n#Initializing logging facility\nlogger = logging.getLogger(appname)\nlogFile = get_config_item('logging', 'log_file', appname + '.log')\nlogLevel = int(get_config_item('logging', 'log_level', logging.INFO))\nbkpCount = int(get_config_item('logging', 'backup_count', 120))\nlogger.setLevel(logLevel)\nformatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')\nfh = TimedRotatingFileHandler(logFile, when = 'midnight', backupCount = bkpCount)\nfh.suffix = '%Y_%m_%d'\nfh.setFormatter(formatter)\nlogger.addHandler(fh)\nlogger.info('IAA daemon starting as user \"%s\"' % curuser)\n\n#Checking for stale pid file\nif os.path.isfile(pid_file):\n\tlogger.info('Found stale pid file')\n\tos.remove(pid_file)\n\tlogger.info('Stale pid file removed, proceeding with daemon startup')\n\n#Retrieving daemon parameters\nalive_notification_time = get_config_item('daemon', 'alive_msg_at', '00:00')\n\n#Retrieving MQTT parameters\nbrokerHost = get_config_item('mqtt', 'hostname', 'localhost')\nbrokerPort = int(get_config_item('mqtt', 'port', 1883))\nbrokerKeepalive = int(get_config_item('mqtt', 'keepalive', 60))\nmqttParms = (brokerHost, brokerPort, brokerKeepalive,)\nmqttClient = get_config_item('mqtt', 'client_id', 'mqttclient')\n\n#Retrieving 1-wire parameters\nw1_max_errors = int(get_config_item('onewire', 'max_errors', 5))\nw1_error_period = int(get_config_item('onewire', 'error_period', 20))\nw1_suppresssion_period = int(get_config_item('onewire', 'suppression_period', 10))\nw1_Parms = (w1_max_errors, w1_error_period, w1_suppresssion_period,)\n\n#Retrieving Wiegand RFID reader parameters\nwiegand_data0 = int(get_config_item('wiegand', 'data0_gpio', 17))\nwiegand_data1 = int(get_config_item('wiegand', 'data1_gpio', 18))\n\n#MySql connection\ndbhost = get_config_item('database', 'server', 'localhost')\ndbname = get_config_item('database', 'db_name', 'iaa')\ndbuser = get_config_item('database', 'user', 'root')\ndbpass = get_config_item('database', 'passwd', '')\ndbParms = (dbhost, dbuser, dbpass, dbname)\ndb = MySQLdb.connect(*dbParms)\ncur = db.cursor()\n\n#Retrieving Telegram active users\nrows_count = cur.execute(\"SELECT chat_id FROM users WHERE NOT enabled = 0\")\n\nif rows_count:\n\ttelegram_users = cur.fetchall()\n\tlogger.debug(\"Successfully retrieved Telegram active users\")\nelse:\n\tlogger.warning(\"No Telegram active users found. At least one active user must be defined.\")\n\n#Retrieving MQTT topics\nrows_count = cur.execute(\"SELECT topic FROM zones WHERE technology = 'MQ' AND topic IS NOT NULL\")\n\nif rows_count:\n\tmqttTopics = cur.fetchall()\n\tlogger.debug(\"Successfully retrieved MQTT topics\")\nelse:\n\tlogger.warning(\"No MQTT topics found. At least one MQTT topic should be defined.\")\n\n#Retrieving 1-wire nodes\nrows_count = cur.execute(\"SELECT id, name, type, node, CONCAT(door, ',', tamper) AS iostate FROM zones WHERE technology = 'W1'\")\n\nif rows_count:\n\tw1_nodes = cur.fetchall()\n\tlogger.debug(\"Successfully retrieved 1-wire nodes\")\nelse:\n\tlogger.info(\"No 1-wire nodes found.\")\n\n#Closing database resources\ncur.close()\ndb.close()\n\n#Initializing Telegram bot\nbot_token = get_config_item('telegram', 'bot_key', '')\ntry:\n\ttbot = telegram.Bot(token = bot_token)\nexcept Exception as e:\n\tprint(\"Telegram bot initialization failed with error: %s\" %str(e))\n\tquit()\n\t\n#Defining MQTT event queue\nmqtt_q = Queue(maxsize=0)\n\n#Defining Telegram notifications queue\ntelegram_q = Queue(maxsize=0)\n\n#Defining Event Log queue\nevent_q = Queue(maxsize=0)\n\n#Defining I2C write queue\ni2c_q = Queue(maxsize=0)\n\n#Defining system logic model (FSM)\nlogic_model = IAAmodel(logger, telegram_q, i2c_q)\n\n#----- Instancing thread classes -----\n\n#MQTT listener\nmqtt_listener_cls = MQTT_listener(mqttClient, mqttParms, mqttTopics, mqtt_q, logger)\n\n#1Wire scanner\now_scanner_cls = OneWire_scanner(telegram_q, mqtt_q, dbParms, logger, w1_Parms, w1_nodes)\n\n#Telegram messenger\ntelegram_messenger_cls = Telegram_messenger(telegram_q, telegram_users, tbot, logger)\n\n#Telegram updater\ntelegram_updater_cls = Telegram_updater(tbot, logger, logic_model)\n\n#Master timer\nmaster_timer_cls = Master_timer(alive_notification_time, telegram_q, dbParms, logger)\n\n#Wiegand RFID reader\nwiegand_reader_cls = Wiegand_reader(logger)\n\n#I/O module I2C handler\ni2c_handler_cls = I2C_handler(logger, i2c_q)\n\n#Main Queue processor\nmain_q_processor_cls = Queue_processor(mqtt_q, telegram_q, dbParms, logger, logic_model)\n\n\ndef do_main():\n\tglobal mqtt_listener_thread, ow_scanner_thread, telegram_messenger_thread, main_queue_thread, telegram_updater_thread, master_timer_thread, wiegand_reader_thread, i2c_handler_thread\n\n\t#Starting MQTT listener thread\n\tmqtt_listener_thread = Thread(target = mqtt_listener_cls.run)\n\tmqtt_listener_thread.start()\n\t\n\t#Starting 1Wire scanner thread\n\tow_scanner_thread = Thread(target = ow_scanner_cls.run)\n\tow_scanner_thread.start()\n\t\n\t#Starting Telegram messenger thread\n\ttelegram_messenger_thread = Thread(target = telegram_messenger_cls.run)\n\ttelegram_messenger_thread.start()\n\t\n\t#Starting Telegram updater thread\n\ttelegram_updater_thread = Thread(target = telegram_updater_cls.run)\n\ttelegram_updater_thread.start()\n\t\n\t#Starting Master timer thread\n\tmaster_timer_thread = Thread(target = master_timer_cls.run)\n\tmaster_timer_thread.start()\n\t\n\t#Starting Wiegand RFID reader thread\n\twiegand_reader_thread = Thread(target = wiegand_reader_cls.run)\n\twiegand_reader_thread.start()\n\t\n\t#Starting I/O module I2C handler thread\n\ti2c_handler_thread = Thread(target = i2c_handler_cls.run)\n\ti2c_handler_thread.start()\n\t\n\t#Starting Queue processor thread\n\tmain_queue_thread = Thread(target = main_q_processor_cls.run)\n\tmain_queue_thread.start()\n\n\ttelegram_q.put('IAA sistema inizializzato!')\n\t\n\twhile(True):\n\t\ttime.sleep(300)\n\t\tlogger.info('IAA daemon main thread tick')\n\n\ndef graceful_stop(signum, frame):\n\tlogger.info('Termination sequence invoked on signal %s' % signum)\n\t\n\tlogger.debug('Graceful termination request for MQTT listener thread')\n\tmqtt_listener_cls.terminate()\n\tmqtt_listener_thread.join()\n\tlogger.info('MQTT listener thread terminated')\n\t\n\tlogger.debug('Graceful termination request for 1Wire scanner thread')\n\tow_scanner_cls.terminate()\n\tow_scanner_thread.join()\n\tlogger.info('1Wire scanner thread terminated')\n\t\n\tlogger.debug('Injecting exit token into Telegram queue')\n\ttelegram_q.put(None)\n\ttelegram_messenger_thread.join()\n\tlogger.info('Telegram messenger thread terminated')\n\t\n\tlogger.debug('Graceful termination request for Telegram updater thread')\n\ttelegram_updater_cls.terminate()\n\ttelegram_updater_thread.join()\n\tlogger.info('Telegram updater thread terminated')\n\t\n\tlogger.debug('Graceful termination request for Master timer thread')\n\tmaster_timer_cls.terminate()\n\tmaster_timer_thread.join()\n\tlogger.info('Master timer thread terminated')\n\t\n\tlogger.debug('Graceful termination request for Wiegand RFID reader thread')\n\twiegand_reader_cls.terminate()\n\twiegand_reader_thread.join()\n\tlogger.info('Wiegand RFID reader thread terminated')\n\t\n\tlogger.debug('Graceful termination request for I/O module I2C handler thread')\n\ti2c_handler_cls.terminate()\n\ti2c_handler_thread.join()\n\tlogger.info('I/O module I2C handler thread terminated')\n\t\n\tlogger.debug('Injecting exit token into maint queue')\n\tmqtt_q.put(None)\n\tmain_queue_thread.join()\n\tlogger.info('Queue processor thread terminated')\n\t\n\ttime.sleep(2)\n\tlogger.info('Terminating main daemon context')\n\tcontext.terminate(signum, frame)\n\n\ndef runDaemon():\n\tglobal context\n\n\tcontext = daemon.DaemonContext(\n\t\tworking_directory=os.getcwd(),\n\t\tstdout=open(os.path.join(os.getcwd(), \"logs\", \"STDOUT\"), 'w+'),\n\t\tstderr=open(os.path.join(os.getcwd(), \"logs\", \"STDERR\"), 'w+'),\n\t\tumask=0o002,\n\t\tpidfile = daemon.pidfile.PIDLockFile(pid_file),\n\t\t\n\t\tfiles_preserve = [\n\t\t\tfh.stream,\n\t\t],\n\t)\n\n\tcontext.signal_map = {\n\t\tsignal.SIGTERM: graceful_stop,\n\t\tsignal.SIGHUP: 'terminate',\n\t}\n\n\twith context:\n\t\tdo_main()\n\n\nif __name__ == \"__main__\":\n\trunDaemon()\n","sub_path":"iaa.py","file_name":"iaa.py","file_ext":"py","file_size_in_byte":9001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"459024390","text":"import os,sys,django,openpyxl,click,MySQLdb\r\nsys.path.append(\"C:\\PythonCourse\\onlineproject\\onlineproject\")\r\nos.environ[\"DJANGO_SETTINGS_MODULE\"]= \"onlineproject.settings\"\r\ndjango.setup()\r\nfrom onlineapp.models import College,Student,MockTest1\r\nfrom onlineproject.settings import DATABASES\r\n@click.group()\r\ndef cli():\r\n pass\r\n@cli.command('createdb',short_help='creates database')\r\ndef createdb():\r\n db = MySQLdb.connect(host=\"localhost\", user=\"root\", passwd=\"sush131097\")\r\n cur = db.cursor()\r\n cur.execute('create database if not exists '+DATABASES['default']['NAME'])\r\n os.system(\"python manage.py makemigrations\")\r\n os.system(\"python manage.py migrate\")\r\n\r\n\r\n@cli.command('dropdb',short_help='drop database')\r\ndef dropdb():\r\n db = MySQLdb.connect(host=\"localhost\", user=\"root\", passwd=\"sush131097\")\r\n cur = db.cursor()\r\n cur.execute('drop database if exists '+DATABASES['default']['NAME'])\r\n\r\n\r\n@cli.command('populatedb',short_help='populates database')\r\n@click.argument('studentsfile')\r\n@click.argument('marksfile')\r\ndef populatedb(studentsfile,marksfile):\r\n def loadColleges():\r\n wb = openpyxl.load_workbook(studentsfile)\r\n ws = wb['Colleges']\r\n i = -1\r\n for row in ws.rows:\r\n if (i == -1):\r\n i = i + 1\r\n continue\r\n fields = [i]\r\n for cell in row:\r\n fields.append(cell.value)\r\n c = College(name=fields[1], acronym=fields[2], location=fields[3], contact=fields[4])\r\n c.save()\r\n\r\n def loadStudents():\r\n wb = openpyxl.load_workbook(studentsfile)\r\n ws = wb['Current']\r\n i = -1\r\n for row in ws.rows:\r\n if (i == -1):\r\n i = i + 1\r\n continue\r\n fields = [i]\r\n for cell in row:\r\n fields.append(cell.value)\r\n c = College.objects.get(acronym=fields[2])\r\n # print(fields)\r\n d = Student(name=fields[1], college=c, email=fields[3], db_folder=fields[4], dob=\"1997-11-21\",\r\n dropped_out=0)\r\n d.save()\r\n\r\n\r\n\r\n def loadMarks():\r\n wb = openpyxl.load_workbook(marksfile)\r\n ws = wb['Sheet']\r\n i = -1\r\n for row in ws.rows:\r\n if (i == -1):\r\n i = i + 1\r\n continue\r\n fields = [i]\r\n for cell in row:\r\n fields.append(cell.value)\r\n student = fields[1][7:]\r\n dbname = student[student.find('_') + 1:student.find('_') + 1 + student[student.find('_') + 1:].find('_')]\r\n #print(dbname)\r\n s = Student.objects.get(db_folder=dbname)\r\n #print(s)\r\n total_marks = int(fields[2]) + int(fields[3]) + int(fields[4]) + int(fields[5])\r\n #print(total_marks)\r\n d = MockTest1(student=s, problem1=fields[2], problem2=fields[3], problem3=fields[4], problem4=fields[5],\r\n total=total_marks)\r\n d.save()\r\n\r\n loadColleges()\r\n loadStudents()\r\n loadMarks()\r\n\r\n#loadColleges()\r\n\r\n#loadStudents()\r\n#loadMarks()\r\n@cli.command('cleardata',short_help='clears data in database')\r\ndef cleardata():\r\n College.objects.all().delete()\r\n Student.objects.all().delete()\r\n MockTest1.objects.all().delete()\r\ncli()","sub_path":"Apps Course/onlineproject/onlinedb.py","file_name":"onlinedb.py","file_ext":"py","file_size_in_byte":3332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"211589440","text":"import glob\nimport operator\n\n# 조사할 디렉토리 경로\nROOT_DIR = './'\n\n# 조사할 확장자 목록\nextensions = [\n '*.py',\n '*.java',\n '*.py',\n '*.js',\n '*.cs',\n '*.css',\n '*.Rmd'\n]\n\n# 무시할 디렉토리 경로 목록\nignore_paths = ['node_modules', 'Creeper-Seeker','3D-Models','AdGuard-Filter','Build-Archive','DB','public','build','.git','ftc_app_1920_New','HawksNestWebsocketAPI','i_want_go_home','PixelLabyrinth','Poster-Dark','yukinosaika.github.io']\n\n\n\n\n# 무시할 파일 목록\nignore_files = [\n 'NanumGothic.css',\n 'Designer.cs',\n '*.Designer.cs'\n]\n\ntotal_line_count = 0\ntotal_file_count = 0\nfiles_grabbed = []\n\n\n# dictionary 를 value 기준으로 정렬된 tuple 로 변환\ndef dict_to_sorted_by_val(tmp_dict, reverse=False):\n return sorted(tmp_dict.items(), key=operator.itemgetter(1), reverse=reverse)\n\n\n# 카운트 함수\ndef start_count():\n try:\n global total_line_count, total_file_count\n line_count_dict = dict()\n extension_count_dict = dict.fromkeys(extensions, 0)\n\n # 설정한 확장자가 포함된 파일 리스트 생성\n [files_grabbed.extend(glob.glob(f'{ROOT_DIR}/**/{extension}', recursive=True)) for extension in extensions]\n\n # 파일별로 라인수, 확장자별 갯수, 라인총합, 확장자별 갯수 총합 구함\n for file_name_with_path in files_grabbed:\n file_name = file_name_with_path.split('/')[-1]\n ext = file_name.split('.')[-1]\n if file_name in ignore_files:\n continue\n\n is_ignored = False\n for ignore_path in ignore_paths:\n if file_name_with_path.find(ignore_path) != -1:\n is_ignored = True\n break\n\n if is_ignored:\n continue\n\n try:\n extension_count_dict['*.' + ext] += 1\n\n\n line_count = sum(1 for line in open(file_name_with_path, encoding='ISO-8859-1'))\n line_count_dict[file_name_with_path] = line_count\n except:\n pass\n\n total_line_count += line_count\n total_file_count += 1\n print(total_line_count)\n\n # reverse=True 면 value 기준으로 내림차순 정렬\n sorted_line_count = dict_to_sorted_by_val(line_count_dict, reverse=True)\n sorted_file_count = dict_to_sorted_by_val(extension_count_dict, reverse=True)\n return sorted_line_count, sorted_file_count\n except:\n pass\n\n\n# 카운트 함수 실행\nline_count, file_count = start_count()\n\n# 출력\nfor result in line_count:\n file = result[0]\n count = result[1]\n print('{:>4} {:<0}'.format(count, file))\n\nprint('\\n지정한 확장자별 파일 개수')\nfor result in file_count:\n file = result[0]\n count = result[1]\n print('{:<7} {:>3} 개'.format(file, count))\n\nprint(f'\\n프로젝트 전체 파일 수: {total_file_count} 개')\nprint(f'프로젝트 전체 코드 라인 수: {total_line_count} 줄\\n')","sub_path":"linecount.py","file_name":"linecount.py","file_ext":"py","file_size_in_byte":3020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"410765674","text":"import json\nimport os\nimport numpy as np\n\nvalidation_data = open('../data/validation.json')\ndata = json.load(validation_data)\nvaildation_labels = data['annotations']\nvalidation_data.close()\n\nf = open(\"result.txt\", \"r\")\nlines = f.read().split('\\n')\nf.close()\nmicro_precision_n = 0\nmicro_precision_d = 0\nmicro_recall_d = 0\nfor i in range(1,len(lines)-1):\n split_line = lines[i].split(',')\n image_id = int(split_line[0])\n y_pred = np.sort(np.array(np.fromstring(split_line[1], sep=' ')).astype(int))\n y_true = np.sort(np.array(vaildation_labels[image_id-1]['labelId']).astype(int))\n \n #True positives = predicted and true\n tp = len(np.intersect1d(y_true, y_pred))\n #False positives = predicted but not true\n fp = len(y_pred) - tp \n #False negative = true but not predicted\n fn = len(y_true) - tp\n \n micro_precision_n += tp\n micro_precision_d += tp + fp\n #micro_recall_n += tp\n micro_recall_d += tp + fn\n \nprec = micro_precision_n/micro_precision_d\nrecall = micro_precision_n/micro_recall_d\n\naccuracy = 2*prec*recall/(prec+recall)\nprint('Validation accuracy: ' + str(accuracy))\n","sub_path":"report2/accuracy.py","file_name":"accuracy.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"63807543","text":"# -*- coding:utf8 -*-\nfrom __future__ import print_function\n\nimport random\nimport os\nimport tensorflow as tf\nimport numpy as np\nfrom collections import defaultdict\n\nfrom utils.LogHandler import LogHandler\nfrom utils.utils import load_train_valid_labels, read_features, batch_iter, valid_iter, write_in_file\n\nclass HALF_SP(object):\n\n def __init__(self, learning_rate, batch_size, neg_ratio, gamma, eta\n , n_input, n_out, n_hidden, n_layer, type_model, is_valid\n , device, files, log_file):\n if os.path.exists('log/'+log_file+'.log'):\n os.remove('log/'+log_file+'.log')\n self.logger = LogHandler(log_file)\n\n self.device = device\n\n self.type_model = type_model\n\n # Parameters\n self.learning_rate = learning_rate\n self.batch_size = batch_size\n self.neg_ratio = neg_ratio\n self.valid = is_valid\n self.valid_prop = .9 if self.valid else 1.\n self.valid_sample_size = 10\n\n self.gamma = gamma\n self.eta = eta\n\n self.cur_epoch = 1\n\n # Network Parameters\n self.n_hidden = n_hidden if type_model=='mlp' else n_input # number of neurons in hidden layer\n self.n_input = n_input # size of node embeddings\n self.n_out = n_out # hashing code\n self.n_layer = n_layer # number of layer\n\n # Set Train Data\n if not isinstance(files, list) and len(files)<3:\n self.logger.info('The alogrihtm needs inputs: feature-src, feature-end, identity-linkage')\n return\n\n # tf Graph input\n self.lookup = defaultdict(dict)\n self.look_back = defaultdict(list)\n self._read_train_dat(files) # features from source, features from end, label file\n self.valid_sample_size = min(min(self.valid_sample_size, len(self.look_back['src'])-1)\n , len(self.look_back['end'])-1)\n\n # TF Graph Building\n self.sess = tf.Session()\n cur_seed = random.getrandbits(32)\n initializer = tf.contrib.layers.xavier_initializer(uniform=False, seed=cur_seed)\n with tf.device(self.device):\n with tf.variable_scope(\"model\", reuse=None, initializer=initializer):\n self._init_weights()\n self.build_graph(type_model)\n self.build_valid_graph(type_model)\n self.sess.run(tf.global_variables_initializer())\n\n def _read_train_dat(self, files):\n self.F, self.lookup['src'], self.look_back['src'] \\\n = read_features(files['feat-src'])\n self.G, self.lookup['end'], self.look_back['end'] = read_features(files['feat-end'])\n self.L = load_train_valid_labels(files['linkage'], self.lookup, self.valid_prop)\n\n def _init_weights(self):\n # Store layers weight & bias\n self.weights = dict()\n self.biases = dict()\n if self.type_model=='mlp':\n # inputs\n self.weights['h0_src'] = tf.Variable(tf.random_normal([self.n_input, self.n_hidden]))\n self.weights['h0_end'] = tf.Variable(tf.random_normal([self.n_input, self.n_hidden]))\n self.biases['b0_src'] = tf.Variable(tf.zeros([self.n_hidden]))\n self.biases['b0_end'] = tf.Variable(tf.zeros([self.n_hidden]))\n # hidden\n for i in range(1,self.n_layer):\n self.weights['h{}'.format(i)] = tf.Variable(tf.random_normal([self.n_hidden, self.n_hidden]))\n self.biases['b{}'.format(i)] = tf.Variable(tf.zeros([self.n_hidden]))\n # outputs\n self.weights['out'] = tf.Variable(tf.random_normal([self.n_hidden, self.n_out]))\n self.biases['b_out'] = tf.Variable(tf.zeros([self.n_out]))\n\n def build_mlp_code_graph(self, inputs, tag):\n\n # Input layer\n layer = tf.nn.sigmoid(tf.add(tf.matmul(tf.reshape(inputs,[-1,self.n_input]), self.weights['h0_'+tag])\n , self.biases['b0_'+tag]))\n for i in range(1,self.n_layer):\n layer = tf.nn.sigmoid(tf.add(tf.matmul(layer, self.weights['h{}'.format(i)])\n , self.biases['b{}'.format(i)]))\n # Output fully connected layer with a neuron\n code = tf.nn.tanh(tf.matmul(layer, self.weights['out']) + self.biases['b_out'])\n\n return code\n\n def build_lin_code_graph(self, inputs, tag):\n\n # Output fully layer with a neuron\n code = tf.nn.tanh(tf.matmul(tf.reshape(inputs,[-1,self.n_input]), self.weights['out']) + self.biases['b_out'])\n\n return code\n\n def build_train_graph(self, src_tag, end_tag, code_graph):\n\n PF = code_graph(self.inputs_pos[src_tag], src_tag) # batch_size*n_out\n PG = code_graph(self.inputs_pos[end_tag], end_tag) # batch_size*n_out\n NF = tf.reshape(\n code_graph(self.inputs_neg[src_tag], src_tag)\n , [-1, self.neg_ratio, self.n_out]\n ) # batch_size*neg_ratio*n_out\n NG = tf.reshape(\n code_graph(self.inputs_neg[end_tag], end_tag)\n , [-1, self.neg_ratio, self.n_out]\n ) # batch_size*neg_ratio*n_out\n B = tf.sign(PF+PG) # batch_size*n_out\n # self.ph['B'] = tf.sign(self.ph['F']+self.ph['G']) # batch_size*n_out\n\n # train loss\n term1_first = tf.log(tf.nn.sigmoid(tf.reduce_sum(tf.multiply(PF, PG),axis=1)))\n term1_second = tf.reduce_sum(tf.log(1-tf.nn.sigmoid(tf.reduce_sum(tf.multiply(NF, NG),axis=2))),axis=1)\n term1 = -tf.reduce_sum(term1_first+term1_second)\n term2 = tf.reduce_sum(tf.pow((B-PF),2))+tf.reduce_sum(tf.pow((B-PG),2))\n term3 = tf.reduce_sum(tf.pow(PF,2)+tf.reduce_sum(tf.pow(NF,2),axis=1))+tf.reduce_sum(tf.pow(PG,2)+tf.reduce_sum(tf.pow(NG,2),axis=1))\n # term1 = -tf.reduce_sum(tf.multiply(self.ph['S'], theta)-tf.log(1+tf.exp(theta)))\n # term2 = tf.reduce_sum(tf.norm(self.ph['B']-self.ph['F'],axis=1))+tf.reduce_sum(tf.norm(self.ph['B']-self.ph['G'],axis=1))\n # term3 = tf.reduce_sum(tf.norm(self.ph['F'],axis=1))+tf.reduce_sum(tf.norm(self.ph['G'],axis=1))\n self.term1 = term1\n self.term2 = term2\n self.term3 = term3\n\n return (term1+self.gamma*term2+self.eta*term3)/self.cur_batch_size\n\n def build_graph(self, type_code_graph):\n self.cur_batch_size = tf.placeholder('float32', name='batch_size')\n\n self.inputs_pos = {\n 'src': tf.placeholder('float32', [None, self.n_input]),\n 'end': tf.placeholder('float32', [None, self.n_input])\n }\n self.inputs_neg = {\n 'src': tf.placeholder('float32', [None, self.neg_ratio, self.n_input]),\n 'end': tf.placeholder('float32', [None, self.neg_ratio, self.n_input])\n }\n\n if type_code_graph=='lin':\n code_graph = self.build_lin_code_graph\n elif type_code_graph=='mlp':\n code_graph = self.build_mlp_code_graph\n\n self.loss = (self.build_train_graph('src', 'end', code_graph) \n + self.build_train_graph('end', 'src', code_graph))/2.\n\n optimizer = tf.train.AdamOptimizer(self.learning_rate)\n self.train_op = optimizer.minimize(self.loss)\n\n def build_valid_graph(self, type_code_graph):\n\n # validation\n self.inputs_val = {\n 'src': tf.placeholder('float32', [None, self.valid_sample_size, self.n_input]),\n 'end': tf.placeholder('float32', [None, self.valid_sample_size, self.n_input])\n }\n\n if type_code_graph=='lin':\n code_graph = self.build_lin_code_graph\n elif type_code_graph=='mlp':\n code_graph = self.build_mlp_code_graph\n\n valids = {\n 'src': tf.reshape(\n code_graph(self.inputs_val['src'], 'src')\n , [-1, self.valid_sample_size, self.n_out]\n ), # batch_size*neg_ratio*n_out\n 'end': tf.reshape(\n code_graph(self.inputs_val['end'], 'end')\n , [-1, self.valid_sample_size, self.n_out]\n ) # batch_size*neg_ratio*n_out \n }\n\n # self.dot_dist = tf.reduce_sum(tf.multiply(valid_f, valid_g),axis=2)\n self.hamming_dist = -tf.reduce_sum(\n tf.clip_by_value(tf.sign(tf.multiply(valids['src'],valids['end'])),-1.,0.)\n , axis=2\n )\n\n def train_one_epoch(self):\n sum_loss = 0.0\n mrr = 0.0\n\n # train process\n # print 'start training...'\n batches = batch_iter(self.L, self.batch_size, self.neg_ratio\\\n , self.lookup, 'src', 'end')\n\n batch_id = 0\n for batch in batches:\n # training the process from source network to end network\n pos,neg = batch\n if not len(pos['src'])==len(pos['end']) and not len(neg['src'])==len(neg['end']):\n self.logger.info('The input label file goes wrong as the file format.')\n continue\n batch_size = len(pos['src'])\n feed_dict = {\n self.inputs_pos['src']:self.F[pos['src'],:],\n self.inputs_pos['end']:self.G[pos['end'],:],\n self.inputs_neg['src']:self.F[neg['src'],:],\n self.inputs_neg['end']:self.G[neg['end'],:],\n self.cur_batch_size:batch_size\n }\n _, cur_loss = self.sess.run([self.train_op, self.loss],feed_dict)\n\n sum_loss += cur_loss\n batch_id += 1\n\n if self.valid:\n # valid process\n valid = valid_iter(self.L, self.valid_sample_size, self.lookup, 'src', 'end')\n # print valid_f,valid_g\n if not len(valid['src'])==len(valid['end']):\n self.logger.info('The input label file goes wrong as the file format.')\n return\n valid_size = len(valid['src'])\n feed_dict = {\n self.inputs_val['src']:self.F[valid['src'],:],\n self.inputs_val['end']:self.G[valid['end'],:],\n }\n # valid_dist = self.sess.run(self.dot_dist,feed_dict)\n valid_dist = self.sess.run(self.hamming_dist,feed_dict)\n for i in range(valid_size):\n fst_dist = valid_dist[i][0]\n pos = 1\n for k in range(1,len(valid_dist[i])):\n if fst_dist>=valid_dist[i][k]:\n pos+=1\n # print pos\n # self.logger.info('dist:{},pos:{}'.format(fst_dist,pos))\n # print valid_dist[i]\n mrr += 1./pos\n self.logger.info('Epoch={}, sum of loss={!s}, mrr={}'\n .format(self.cur_epoch, sum_loss/(batch_id+1e-8), mrr/(valid_size+1e-8)))\n else:\n self.logger.info('Epoch={}, sum of loss={!s}'\n .format(self.cur_epoch, sum_loss/batch_id))\n\n self.cur_epoch += 1\n\n # print(sum_loss/(batch_id+1e-8), mrr/(valid_size+1e-8))\n return sum_loss/(batch_id+1e-8), mrr/(valid_size+1e-8)\n\n def save_models(self, filename):\n if os.path.exists(filename):\n os.remove(filename)\n for k,v in self.weights.items():\n if self.type_model == 'lin':\n if 'out' not in k:\n continue\n write_in_file(filename, v.eval(self.sess), k)\n for k,v in self.biases.items():\n if self.type_model == 'lin':\n if 'out' not in k:\n continue\n write_in_file(filename, v.eval(self.sess), k)\n\nif __name__ == '__main__':\n res_file = 'res_file'\n\n # SAVING_STEP = 1\n # MAF_EPOCHS = 21\n # model = DCNH(learning_rate=0.1, batch_size=4, neg_ratio=3, n_input=4, n_out=2, n_hidden=3\n # ,files=['tmp_res.node_embeddings_src', 'tmp_res.node_embeddings_obj', 'data/test.align'])\n SAVING_STEP = 10\n MAF_EPOCHS = 20001\n model = DCNH_SP(learning_rate=0.01, batch_size=128, neg_ratio=5, n_input=256, n_out=32, n_hidden=32, n_layer=2\n ,files=['douban_all.txt', 'weibo_all.txt', 'douban_weibo.identity.users.final.p0dot8']\n ,log_file='DCNH_SP'\n ,device=':/gpu:0')\n for i in range(MAF_EPOCHS):\n model.train_one_epoch()\n if i>0 and i%SAVING_STEP==0:\n model.save_models(res_file+'.epoch_'+str(i))","sub_path":"src/half/half_sp.py","file_name":"half_sp.py","file_ext":"py","file_size_in_byte":12465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"264680362","text":"#\n# @lc app=leetcode.cn id=102 lang=python3\n#\n# [102] 二叉树的层次遍历\n#\n\n# @lc code=start\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def levelOrder(self, root: TreeNode) -> List[List[int]]:\n if not root:\n return []\n its = [root]\n res = []\n while len(its) > 0:\n tmp = []\n new_its = []\n # print(its)\n for i in its:\n tmp.append(i.val)\n if i.left:\n new_its.append(i.left)\n if i.right:\n new_its.append(i.right)\n its = new_its[:]\n res.append(tmp)\n return res\n\n\n# @lc code=end\n\n","sub_path":"102.二叉树的层次遍历.py","file_name":"102.二叉树的层次遍历.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"212995307","text":"import random\ng = int(input())\nH = []\nV = []\nscores = {}\n\n\nfor i in range(g):\n row = input().split()\n if row[0] == 'H':\n H.append(((i,),sorted(row[2:])))\n else:\n V.append((i,sorted(row[2:])))\n \n for ele in row[2:]:\n scores[ele] = scores.get(ele,0)+1\n\nrandom.shuffle(V)\nfor i in range(0,round(len(V)),2):\n H.append(((V[i][0],V[i+1][0]), list(set([*V[i][1],*V[i+1][1]]))))\n\nNH = []\nfor ele in H:\n score = 0\n t_scores = []\n for i in ele[1]:\n t_scores.append(scores[i])\n t_scores.sort(key=lambda k : -k)\n score = sorted(t_scores[:min(5,len(t_scores))], key=lambda k : -k)\n NH.append((ele[0],score))\n\nNH.sort(key=lambda k: k[1])\nprint(len(NH))\nfor i in NH:\n if len(i[0]) == 2:\n print(i[0][1],i[0][0])\n else:\n print(i[0][0])\n\n","sub_path":"hashcode/gg.py","file_name":"gg.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"417334162","text":"#!/usr/bin/env python3\n\n#This script is meant to be a companion script to pullpostgres.py & env_generator.py. After all the images\n#have been downloaded, ran, and config/env variable information has been pulled this script \n#is to run and populate the env_deltas.csv with postgres information. Furthermore, env_generator.py\n#will have generated an envlist which is used by this script to run the deltas.\n\n#This script was intended to run with the terminal command: \n\n# \"parallel --pipepart -a envlist --eta --block -1 -j 7 python3 env_delta.py\"\n\nimport fileinput\nimport sys\nimport os\nimport pandas as pd\nimport csv\n\n#Converts the Headers to CSV file \nread_file = pd.read_csv(r'./ENV_headers.txt')\nread_file.to_csv(r'./env_deltas.csv', index=None, sep='\\n')\n\nf = open('envlist', 'r')\n#Pulls in image names from envlist list\nimages = [word.strip() for line in f.readlines() for word in line.split(',') if word.strip()]\nf.close()\n\nfor line in fileinput.input():\n line = line.lstrip(\"./\")\n line = line.rstrip(\"/env.txt\\n\")\n f = open(line + \"/env.txt\" , 'r')\n lines = [word.strip() for line in f.readlines() for word in line.split('\\n') if word.strip()]\n for y in range(len(lines)):\n header = lines[y].split(\"=\",1)[0] #ENV variable name\n data = lines[y].partition(\"=\")[2] #ENV data\n\n #Finds line number of ENV variable on master list\n cmd = 'grep -xn ' + header + ' env_deltas.csv > ' + line + '.txt'\n os.system(cmd) \n f = open(line + \".txt\", 'r')\n line_no = f.readline().split(\":\",1)[0]\n cmd = 'rm ' + line + '.txt'\n os.system(cmd)\n \n #Prepares data for insertion into env_deltas\n if(data != \"\" and header != \"_\" and line_no != \" \"):\n data = data.replace(\" \", \"_\").replace(\"(\", \"\").replace(\")\",\"\") \n cmd = 'grep ' + data + ' env_deltas.csv > ' + line + \".txt\" \n sysReturn = os.system(cmd) \n f = open(line + \".txt\", \"r\")\n result = f.readline() \n if sysReturn != 0:\n cmd = 'rm ' + line + '.txt'\n os.system(cmd)\n if result != 0:\n data = data.replace('/', \"\\/\").replace('.', \"\\.\")\n #Awesome sed command to add postgres data to the csv file\n cmd = \"sed '/\\<\" + header + '\\>/s/$/ ,' + data + \"/' env_deltas.csv > \" + line + \".txt && mv \" + line + \".txt env_deltas.csv\" \n os.system(cmd)\n #Clean up\n cmd = \"rm \" + line + \".txt\"\n os.system(cmd)\n","sub_path":"env_delta.py","file_name":"env_delta.py","file_ext":"py","file_size_in_byte":2599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"610783202","text":"from django.core.urlresolvers import resolve\nfrom django.http import HttpRequest\nfrom django.template.loader import render_to_string\nfrom django.test import TestCase\nfrom django.utils.html import escape\n\nfrom lists.forms import ListItemForm\nfrom lists.models import List, ListItem\nfrom lists.views import home_page\n\n\nclass HomePageTest(TestCase):\n\n def test_home_page_renders_home_template(self):\n response = self.client.get('/')\n self.assertTemplateUsed(response, 'lists/home.html')\n\n def test_home_page_uses_item_form(self):\n response = self.client.get('/')\n self.assertIsInstance(response.context['form'], ListItemForm)\n\n\nclass ListViewTest(TestCase):\n\n def test_displays_only_items_for_that_list(self):\n correct_list = List.objects.create()\n ListItem.objects.create(text='item 1', list=correct_list)\n ListItem.objects.create(text='item 2', list=correct_list)\n other_list = List.objects.create()\n ListItem.objects.create(text='other item 1', list=other_list)\n ListItem.objects.create(text='other item 2', list=other_list)\n\n response = self.client.get('/lists/%d/' % (correct_list.id,))\n\n self.assertContains(response, 'item 1')\n self.assertContains(response, 'item 2')\n self.assertNotContains(response, 'other item 1')\n self.assertNotContains(response, 'other item 2')\n\n def test_passes_correct_list_to_template(self):\n other_list = List.objects.create()\n correct_list = List.objects.create()\n\n response = self.client.get('/lists/%d/' % (correct_list.id,))\n\n self.assertEqual(response.context['list'], correct_list)\n\n def test_uses_list_template(self):\n list_ = List.objects.create()\n response = self.client.get('/lists/%d/' % (list_.id,))\n self.assertTemplateUsed(response, 'lists/list.html')\n\n def test_can_save_POST_request_to_existing_list(self):\n other_list = List.objects.create()\n correct_list = List.objects.create()\n\n self.client.post(\n '/lists/%d/' % (correct_list.id,),\n data={'text': 'A new item for an existing list'}\n )\n\n self.assertEqual(ListItem.objects.count(), 1)\n new_item = ListItem.objects.first()\n self.assertEqual(new_item.text, 'A new item for an existing list')\n self.assertEqual(new_item.list, correct_list)\n\n def test_POST_redirects_to_list_view(self):\n other_list = List.objects.create()\n correct_list = List.objects.create()\n\n response = self.client.post(\n '/lists/%d/' % (correct_list.id,),\n data={'text': 'A new item for an existing list'}\n )\n\n self.assertRedirects(response, '/lists/%d/' % (correct_list.id,))\n\n def test_validation_errors_end_up_on_lists_page(self):\n list_ = List.objects.create()\n\n response = self.client.post(\n '/lists/%d/' % (list_.id,),\n data={'text': ''}\n )\n\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'lists/list.html')\n expected_error = escape(\"You can't have an empty list item\")\n self.assertContains(response, expected_error)\n\n\nclass NewListTest(TestCase):\n\n def test_saving_a_POST_request(self):\n self.client.post('/lists/new', data={\n 'text': 'A new list item'\n })\n\n self.assertEqual(ListItem.objects.count(), 1)\n new_item = ListItem.objects.first()\n self.assertEqual(new_item.text, 'A new list item')\n\n def test_redirects_after_POST(self):\n response = self.client.post('/lists/new', data={\n 'text': 'A new list item'\n })\n\n new_list = List.objects.first()\n self.assertRedirects(response, '/lists/%d/' % (new_list.id,))\n\n def test_validation_errors_are_sent_back_to_home_page_template(self):\n response = self.client.post('/lists/new', data={'text': ''})\n\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'lists/home.html')\n expected_error = escape(\"You can't have an empty list item\")\n self.assertContains(response, expected_error)\n\n def test_invalid_list_items_are_not_saved(self):\n self.client.post('/lists/new', data={'text': ''})\n\n self.assertEqual(List.objects.count(), 0)\n self.assertEqual(ListItem.objects.count(), 0)\n","sub_path":"lists/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":4392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"342347973","text":"from tensorflow.examples.tutorials.mnist import input_data\nimport tensorflow as tf\nimport numpy as np\n\nmnist = input_data.read_data_sets(\"../MNIST_data/\")\n\nX_train = mnist.train.images\nX_test = mnist.test.images\ny_train = mnist.train.labels.astype(np.int32)\ny_test = mnist.test.labels\n\nconfig = tf.contrib.learn.RunConfig(tf_random_seed=42)\n\nfeature_cols = tf.contrib.learn.infer_real_valued_columns_from_input(X_train)\ndnn_clf = tf.contrib.learn.DNNClassifier(hidden_units=[300, 100], n_classes=10, feature_columns=feature_cols, config=config)\ndnn_clf = tf.contrib.learn.SKCompat(dnn_clf)\ndnn_clf.fit(X_train, y_train, batch_size=50, steps=20000)","sub_path":"test06.py","file_name":"test06.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"38956026","text":"import pandas as pd\nimport numpy as np\nimport torch\nimport logging\nfrom torch_geometric.data import Data\n\n\ndef process_event(event, pt_min=0, phi_range=(-np.pi, np.pi),\n eta_range=(-5, 5), n_phi_sections=9, n_eta_sections=3,\n phi_slope_max=6e-4, z0_max=100):\n # parameters from heptrkx-gnn-tracking/configs/prep_big.yaml\n event_id, hits, cells, particles, truth = event\n # Barrel volume and layer ids\n vlids = [(8, 2), (8, 4), (8, 6), (8, 8),\n (13, 2), (13, 4), (13, 6), (13, 8),\n (17, 2), (17, 4)]\n n_det_layers = len(vlids)\n\n hits = select_hits(hits, truth, particles, vlids,\n pt_min).assign(event_id=event_id)\n\n # Divide detector into sections\n phi_edges = np.linspace(*phi_range, num=n_phi_sections)\n eta_edges = np.linspace(*eta_range, num=n_eta_sections)\n\n hits_sections = split_detector_sections(hits, phi_edges, eta_edges)\n\n # Graph features and scale\n feature_names = ['r', 'phi', 'z']\n feature_scale = np.array([1000., np.pi / n_phi_sections, 1000.])\n\n # Define adjacent layers\n layer_pairs = np.stack([np.arange(n_det_layers)[:-1],\n np.arange(n_det_layers)[1:]], axis=1)\n\n graphs = [construct_graph(section_hits, layer_pairs, phi_slope_max,\n z0_max, feature_names, feature_scale)\n for section_hits in hits_sections]\n return graphs\n\n\ndef select_hits(hits, truth, particles, vlids, pt_min=0):\n # Select barrel layers and assign convenient layer number [0-9]\n vlid_groups = hits.groupby(['volume_id', 'layer_id'])\n\n n_det_layers = len(vlids)\n\n ''' Losing information in hits '''\n hits = pd.concat([vlid_groups.get_group(vlids[i]).assign(layer=i)\n for i in range(n_det_layers)])\n\n # Calculate particle transverse momentum\n pt = np.sqrt(particles.px**2 + particles.py**2)\n # True particle selection.\n # Applies pt cut, removes all noise hits.\n particles = particles[pt > pt_min]\n truth = (truth[['hit_id', 'particle_id']]\n .merge(particles[['particle_id']], on='particle_id'))\n # Calculate derived hits variables\n ''' vector length r, angle phi in radians between x, y'''\n r = np.sqrt(hits.x**2 + hits.y**2)\n phi = np.arctan2(hits.y, hits.x)\n\n ''' r, phi instead of x, y and merging particle_id '''\n # Select the data columns we need\n hits = (hits[['hit_id', 'z', 'layer']]\n .assign(r=r, phi=phi)\n .merge(truth[['hit_id', 'particle_id']], on='hit_id'))\n # Remove duplicate hits\n hits = hits.loc[\n hits.groupby(['particle_id', 'layer'], as_index=False).r.idxmin()\n ]\n return hits\n\n\ndef calc_eta(r, z):\n ''' angle theta in radians between r, z '''\n theta = np.arctan2(r, z)\n # ??\n return -1. * np.log(np.tan(theta / 2.))\n\n\ndef calc_dphi(phi1, phi2):\n \"\"\"Computes phi2-phi1 given in range [-pi, pi]\"\"\"\n dphi = phi2 - phi1\n dphi[dphi > np.pi] -= 2 * np.pi\n dphi[dphi < -np.pi] += 2 * np.pi\n return dphi\n\n\ndef split_detector_sections(hits, phi_edges, eta_edges):\n \"\"\"Split hits according to provided phi and eta boundaries.\"\"\"\n hits_sections = []\n # Loop over sections\n for i in range(len(phi_edges) - 1):\n phi_min, phi_max = phi_edges[i], phi_edges[i + 1]\n # Select hits in this phi section\n ''' hits with similar angle between x,y '''\n phi_hits = hits[(hits.phi > phi_min) & (hits.phi < phi_max)]\n\n # Center these hits on phi=0\n centered_phi = phi_hits.phi - (phi_min + phi_max) / 2\n phi_hits = phi_hits.assign(phi=centered_phi, phi_section=i)\n for j in range(len(eta_edges) - 1):\n eta_min, eta_max = eta_edges[j], eta_edges[j + 1]\n # Select hits in this eta section\n eta = calc_eta(phi_hits.r, phi_hits.z)\n sec_hits = phi_hits[(eta > eta_min) & (eta < eta_max)]\n hits_sections.append(sec_hits.assign(eta_section=j))\n return hits_sections\n\n\ndef select_segments(hits1, hits2, phi_slope_max, z0_max):\n \"\"\"\n Construct a list of selected segments from the pairings\n between hits1 and hits2, filtered with the specified\n phi slope and z0 criteria.\n Returns: pd DataFrame of (index_1, index_2), corresponding to the\n DataFrame hit label-indices in hits1 and hits2, respectively.\n \"\"\"\n # Start with all possible pairs of hits\n keys = ['event_id', 'r', 'phi', 'z']\n hit_pairs = hits1[keys].reset_index().merge(\n hits2[keys].reset_index(), on='event_id', suffixes=('_1', '_2'))\n\n # Compute line through the points\n dphi = calc_dphi(hit_pairs.phi_1, hit_pairs.phi_2)\n dz = hit_pairs.z_2 - hit_pairs.z_1\n dr = hit_pairs.r_2 - hit_pairs.r_1\n phi_slope = dphi / dr\n\n z0 = hit_pairs.z_1 - hit_pairs.r_1 * dz / dr\n\n # Filter segments according to criteria\n good_seg_mask = (phi_slope.abs() < phi_slope_max) & (z0.abs() < z0_max)\n return hit_pairs[['index_1', 'index_2']][good_seg_mask]\n\n\ndef construct_graph(hits, layer_pairs, phi_slope_max, z0_max,\n feature_names, feature_scale):\n \"\"\"Construct one graph (e.g. from one event)\"\"\"\n\n # Loop over layer pairs and construct segments\n layer_groups = hits.groupby('layer')\n segments = []\n for (layer1, layer2) in layer_pairs:\n # Find and join all hit pairs\n try:\n hits1 = layer_groups.get_group(layer1)\n hits2 = layer_groups.get_group(layer2)\n # If an event has no hits on a layer, we get a KeyError.\n # In that case we just skip to the next layer pair\n except KeyError as e:\n logging.info('skipping empty layer: %s' % e)\n continue\n # Construct the segments\n segments.append(select_segments(hits1, hits2, phi_slope_max, z0_max))\n # Combine segments from all layer pairs\n segments = pd.concat(segments)\n\n # Prepare the graph matrices\n n_hits = len(hits)\n n_edges = len(segments)\n\n x = (hits[feature_names].values / feature_scale).astype(np.float32)\n ''' not necessary for edge_index\n Ri = np.zeros((n_hits, n_edges), dtype=np.uint8)\n Ro = np.zeros((n_hits, n_edges), dtype=np.uint8)\n '''\n y = np.zeros(n_edges, dtype=np.float32)\n\n # We have the segments' hits given by dataframe label,\n # so we need to translate into positional indices.\n # Use a series to map hit label-index onto positional-index.\n hit_idx = pd.Series(np.arange(n_hits), index=hits.index)\n\n ''' positional index for remaining hit pairs '''\n seg_start = hit_idx.loc[segments.index_1].values\n seg_end = hit_idx.loc[segments.index_2].values\n\n '''\n # Now we can fill the association matrices.\n # Note that Ri maps hits onto their incoming edges,\n # which are actually segment endings.\n Ri[seg_end, np.arange(n_edges)] = 1\n Ro[seg_start, np.arange(n_edges)] = 1\n '''\n\n # Fill the segment labels\n pid1 = hits.particle_id.loc[segments.index_1].values\n pid2 = hits.particle_id.loc[segments.index_2].values\n y[:] = (pid1 == pid2)\n # Return a tuple of the results\n\n edge_index = torch.from_numpy(np.array([seg_start, seg_end]))\n x = torch.from_numpy(x)\n y = torch.from_numpy(y)\n\n return Data(x=x, num_nodes=n_hits, edge_index=edge_index, y=y)\n","sub_path":"torch_geometric/utils/mltrack_utils.py","file_name":"mltrack_utils.py","file_ext":"py","file_size_in_byte":7308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"453837814","text":"from __future__ import print_function\nimport os, sys, time, re, requests, json, unicodedata\nfrom six.moves.urllib_parse import urlencode\nfrom binascii import hexlify, unhexlify\nfrom spake2 import SPAKE2_Symmetric\nfrom nacl.secret import SecretBox\nfrom nacl.exceptions import CryptoError\nfrom nacl import utils\nfrom .eventsource import EventSourceFollower\nfrom .. import __version__\nfrom .. import codes\nfrom ..errors import ServerError, Timeout, WrongPasswordError, UsageError\nfrom ..util.hkdf import HKDF\nfrom ..channel_monitor import monitor\n\nSECOND = 1\nMINUTE = 60*SECOND\n\ndef to_bytes(u):\n return unicodedata.normalize(\"NFC\", u).encode(\"utf-8\")\n\n# relay URLs are as follows: (MESSAGES=[{phase:,body:}..])\n# GET /list?appid= -> {channelids: [INT..]}\n# POST /allocate {appid:,side:} -> {channelid: INT}\n# these return all messages (base64) for appid=/channelid= :\n# POST /add {appid:,channelid:,side:,phase:,body:} -> {messages: MESSAGES}\n# GET /get?appid=&channelid= (no-eventsource) -> {messages: MESSAGES}\n# GET /get?appid=&channelid= (eventsource) -> {phase:, body:}..\n# POST /deallocate {appid:,channelid:,side:} -> {status: waiting | deleted}\n# all JSON responses include a \"welcome:{..}\" key\n\nclass Channel:\n def __init__(self, relay_url, appid, channelid, side, handle_welcome):\n self._relay_url = relay_url\n self._appid = appid\n self._channelid = channelid\n self._side = side\n self._handle_welcome = handle_welcome\n self._messages = set() # (phase,body) , body is bytes\n self._sent_messages = set() # (phase,body)\n self._started = time.time()\n self._wait = 0.5*SECOND\n self._timeout = 3*MINUTE\n\n def _add_inbound_messages(self, messages):\n for msg in messages:\n phase = msg[\"phase\"]\n body = unhexlify(msg[\"body\"].encode(\"ascii\"))\n self._messages.add( (phase, body) )\n\n def _find_inbound_message(self, phase):\n for (their_phase,body) in self._messages - self._sent_messages:\n if their_phase == phase:\n return body\n return None\n\n def send(self, phase, msg):\n # TODO: retry on failure, with exponential backoff. We're guarding\n # against the rendezvous server being temporarily offline.\n if not isinstance(phase, type(u\"\")): raise TypeError(type(phase))\n if not isinstance(msg, type(b\"\")): raise TypeError(type(msg))\n self._sent_messages.add( (phase,msg) )\n payload = {\"appid\": self._appid,\n \"channelid\": self._channelid,\n \"side\": self._side,\n \"phase\": phase,\n \"body\": hexlify(msg).decode(\"ascii\")}\n data = json.dumps(payload).encode(\"utf-8\")\n r = requests.post(self._relay_url+\"add\", data=data)\n r.raise_for_status()\n resp = r.json()\n self._add_inbound_messages(resp[\"messages\"])\n\n def get(self, phase):\n if not isinstance(phase, type(u\"\")): raise TypeError(type(phase))\n # For now, server errors cause the client to fail. TODO: don't. This\n # will require changing the client to re-post messages when the\n # server comes back up.\n\n # fire with a bytestring of the first message for 'phase' that wasn't\n # one of ours. It will either come from previously-received messages,\n # or from an EventSource that we attach to the corresponding URL\n body = self._find_inbound_message(phase)\n while body is None:\n remaining = self._started + self._timeout - time.time()\n if remaining < 0:\n return Timeout\n queryargs = urlencode([(\"appid\", self._appid),\n (\"channelid\", self._channelid)])\n f = EventSourceFollower(self._relay_url+\"get?%s\" % queryargs,\n remaining)\n # we loop here until the connection is lost, or we see the\n # message we want\n for (eventtype, data) in f.iter_events():\n if eventtype == \"welcome\":\n self._handle_welcome(json.loads(data))\n if eventtype == \"message\":\n self._add_inbound_messages([json.loads(data)])\n body = self._find_inbound_message(phase)\n if body:\n f.close()\n break\n if not body:\n time.sleep(self._wait)\n return body\n\n def deallocate(self, mood=u\"unknown\"):\n # only try once, no retries\n data = json.dumps({\"appid\": self._appid,\n \"channelid\": self._channelid,\n \"side\": self._side,\n \"mood\": mood}).encode(\"utf-8\")\n requests.post(self._relay_url+\"deallocate\", data=data)\n # ignore POST failure, don't call r.raise_for_status()\n\nclass ChannelManager:\n def __init__(self, relay_url, appid, side, handle_welcome):\n self._relay_url = relay_url\n self._appid = appid\n self._side = side\n self._handle_welcome = handle_welcome\n\n def list_channels(self):\n queryargs = urlencode([(\"appid\", self._appid)])\n r = requests.get(self._relay_url+\"list?%s\" % queryargs)\n r.raise_for_status()\n channelids = r.json()[\"channelids\"]\n return channelids\n\n def allocate(self):\n data = json.dumps({\"appid\": self._appid,\n \"side\": self._side}).encode(\"utf-8\")\n r = requests.post(self._relay_url+\"allocate\", data=data)\n r.raise_for_status()\n data = r.json()\n if \"welcome\" in data:\n self._handle_welcome(data[\"welcome\"])\n channelid = data[\"channelid\"]\n return channelid\n\n def connect(self, channelid):\n return Channel(self._relay_url, self._appid, channelid, self._side,\n self._handle_welcome)\n\nclass Wormhole:\n motd_displayed = False\n version_warning_displayed = False\n\n def __init__(self, appid, relay_url):\n if not isinstance(appid, type(u\"\")): raise TypeError(type(appid))\n if not isinstance(relay_url, type(u\"\")):\n raise TypeError(type(relay_url))\n if not relay_url.endswith(u\"/\"): raise UsageError\n self._appid = appid\n self._relay_url = relay_url\n side = hexlify(os.urandom(5)).decode(\"ascii\")\n self._channel_manager = ChannelManager(relay_url, appid, side,\n self.handle_welcome)\n self.code = None\n self.key = None\n self.verifier = None\n self._sent_data = set() # phases\n self._got_data = set()\n\n def handle_welcome(self, welcome):\n if (\"motd\" in welcome and\n not self.motd_displayed):\n motd_lines = welcome[\"motd\"].splitlines()\n motd_formatted = \"\\n \".join(motd_lines)\n print(\"Server (at %s) says:\\n %s\" % (self._relay_url, motd_formatted),\n file=sys.stderr)\n self.motd_displayed = True\n\n # Only warn if we're running a release version (e.g. 0.0.6, not\n # 0.0.6-DISTANCE-gHASH). Only warn once.\n if (\"-\" not in __version__ and\n not self.version_warning_displayed and\n welcome[\"current_version\"] != __version__):\n print(\"Warning: errors may occur unless both sides are running the same version\", file=sys.stderr)\n print(\"Server claims %s is current, but ours is %s\"\n % (welcome[\"current_version\"], __version__), file=sys.stderr)\n self.version_warning_displayed = True\n\n if \"error\" in welcome:\n raise ServerError(welcome[\"error\"], self._relay_url)\n\n def get_code(self, code_length=2):\n if self.code is not None: raise UsageError\n channelid = self._channel_manager.allocate()\n code = codes.make_code(channelid, code_length)\n assert isinstance(code, type(u\"\")), type(code)\n self._set_code_and_channelid(code)\n self._start()\n return code\n\n def input_code(self, prompt=\"Enter wormhole code: \", code_length=2):\n lister = self._channel_manager.list_channels\n code = codes.input_code_with_completion(prompt, lister,\n code_length)\n return code\n\n def set_code(self, code): # used for human-made pre-generated codes\n if not isinstance(code, type(u\"\")): raise TypeError(type(code))\n if self.code is not None: raise UsageError\n self._set_code_and_channelid(code)\n self._start()\n\n def _set_code_and_channelid(self, code):\n if self.code is not None: raise UsageError\n mo = re.search(r'^(\\d+)-', code)\n if not mo:\n raise ValueError(\"code (%s) must start with NN-\" % code)\n self.code = code\n channelid = int(mo.group(1))\n self.channel = self._channel_manager.connect(channelid)\n monitor.add(self.channel)\n\n def _start(self):\n # allocate the rest now too, so it can be serialized\n self.sp = SPAKE2_Symmetric(to_bytes(self.code),\n idSymmetric=to_bytes(self._appid))\n self.msg1 = self.sp.start()\n\n def derive_key(self, purpose, length=SecretBox.KEY_SIZE):\n if not isinstance(purpose, type(u\"\")): raise TypeError(type(purpose))\n return HKDF(self.key, length, CTXinfo=to_bytes(purpose))\n\n def _encrypt_data(self, key, data):\n assert isinstance(key, type(b\"\")), type(key)\n assert isinstance(data, type(b\"\")), type(data)\n assert len(key) == SecretBox.KEY_SIZE, len(key)\n box = SecretBox(key)\n nonce = utils.random(SecretBox.NONCE_SIZE)\n return box.encrypt(data, nonce)\n\n def _decrypt_data(self, key, encrypted):\n assert isinstance(key, type(b\"\")), type(key)\n assert isinstance(encrypted, type(b\"\")), type(encrypted)\n assert len(key) == SecretBox.KEY_SIZE, len(key)\n box = SecretBox(key)\n data = box.decrypt(encrypted)\n return data\n\n\n def _get_key(self):\n if not self.key:\n self.channel.send(u\"pake\", self.msg1)\n pake_msg = self.channel.get(u\"pake\")\n self.key = self.sp.finish(pake_msg)\n self.verifier = self.derive_key(u\"wormhole:verifier\")\n\n def get_verifier(self):\n if self.code is None: raise UsageError\n if self.channel is None: raise UsageError\n self._get_key()\n return self.verifier\n\n def send_data(self, outbound_data, phase=u\"data\"):\n if not isinstance(outbound_data, type(b\"\")):\n raise TypeError(type(outbound_data))\n if not isinstance(phase, type(u\"\")): raise TypeError(type(phase))\n if phase in self._sent_data: raise UsageError # only call this once\n if self.code is None: raise UsageError\n if self.channel is None: raise UsageError\n # Without predefined roles, we can't derive predictably unique keys\n # for each side, so we use the same key for both. We use random\n # nonces to keep the messages distinct, and the Channel automatically\n # ignores reflections.\n self._sent_data.add(phase)\n self._get_key()\n data_key = self.derive_key(u\"wormhole:phase:%s\" % phase)\n outbound_encrypted = self._encrypt_data(data_key, outbound_data)\n self.channel.send(phase, outbound_encrypted)\n\n def get_data(self, phase=u\"data\"):\n if not isinstance(phase, type(u\"\")): raise TypeError(type(phase))\n if phase in self._got_data: raise UsageError # only call this once\n if self.code is None: raise UsageError\n if self.channel is None: raise UsageError\n self._got_data.add(phase)\n self._get_key()\n data_key = self.derive_key(u\"wormhole:phase:%s\" % phase)\n inbound_encrypted = self.channel.get(phase)\n try:\n inbound_data = self._decrypt_data(data_key, inbound_encrypted)\n return inbound_data\n except CryptoError:\n raise WrongPasswordError\n\n def close(self):\n monitor.close(self.channel)\n self.channel.deallocate()\n","sub_path":"src/wormhole/blocking/transcribe.py","file_name":"transcribe.py","file_ext":"py","file_size_in_byte":12230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"556547754","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport json\n\n#from compas_fab.robots import JointTrajectoryPoint\n\nfrom compas.datastructures import Mesh\nfrom compas.datastructures import mesh_transform\n\nfrom compas.geometry import Frame\nfrom compas.geometry import Box\nfrom compas.geometry import centroid_points\nfrom compas.geometry import cross_vectors\nfrom compas.geometry import normalize_vector\nfrom compas.geometry import centroid_polyhedron\nfrom compas.geometry import volume_polyhedron\n\nfrom .utilities import _deserialize_from_data\nfrom .utilities import _serialize_to_data\n\n\n__all__ = ['Element']\n\n\nclass Element(object):\n \"\"\"Data structure representing a discrete elements of an assembly.\n\n Attributes\n ----------\n _frame : :class:`compas.geometry.Frame`\n The frame of the element.\n\n _tool_frame : :class:`compas.geometry.Frame`\n The frame of the element where the robot's tool should attach to.\n\n _source : :class:`compas.geometry.Shape`\n The source geometry of the element, e.g., `compas.geometry.Box`.\n\n _mesh : :class:`compas.geometry.Mesh`\n The mesh geometry of the element.\n\n trajectory : :class:`compas_fab.robots.JointTrajectory`\n The robot trajectory in joint space.\n\n path : :list: :class:`compas.geometry.Frame`\n The robot tool path in cartesian space.\n\n Examples\n --------\n >>> from compas.datastructures import Mesh\n >>> from compas.geometry import Box\n >>> element = Element.from_box(Box(Frame.worldXY(), ))\n\n \"\"\"\n\n def __init__(self, frame):\n super(Element, self).__init__()\n\n self.frame = frame\n self._tool_frame = None\n\n self._source = None\n self._mesh = None\n\n self.trajectory = None\n self.path = []\n\n\n\n @classmethod\n def from_mesh(cls, mesh, frame):\n \"\"\"Construct an element from a mesh.\n\n Parameters\n ----------\n mesh : :class:`Mesh`\n Mesh datastructure.\n frame : :class:`Frame`\n Origin frame of the element.\n\n Returns\n -------\n :class:`Element`\n New instance of element.\n \"\"\"\n element = cls(frame)\n element._source = element._mesh = mesh\n return element\n\n @classmethod\n def from_shape(cls, shape, frame):\n \"\"\"Construct an element from a shape primitive.\n\n Parameters\n ----------\n shape : :class:`compas.geometry.Shape`\n Shape primitive describing the element.\n frame : :class:`Frame`\n Origin frame of the element.\n\n Returns\n -------\n :class:`Element`\n New instance of element.\n \"\"\"\n element = cls(frame)\n element._source = shape\n element._mesh = Mesh.from_shape(element._source)\n return element\n\n @classmethod\n def from_box(cls, box):\n \"\"\"Construct an element from a box primitive.\n\n Parameters\n ----------\n box : :class:`compas.geometry.Box`\n Box primitive describing the element.\n\n Returns\n -------\n :class:`Element`\n New instance of element.\n \"\"\"\n return cls.from_shape(box, box.frame)\n\n @classmethod\n def from_dimensions(cls, length, width, height):\n \"\"\"Construct an element with a box primitive with the given dimensions.\n\n Parameters\n ----------\n length : float\n length of the box.\n width : float\n width of the box.\n height : float\n height of the box.\n Returns\n -------\n :class:`Element`\n New instance of element.\n \"\"\"\n\n frame = Frame([0., 0., height/2], [1, 0, 0], [0, 1, 0]) #center of the box frame\n box = Box(frame, length, width, height)\n return cls.from_shape(box, frame)\n\n @classmethod\n def from_polysurface(cls, guid, frame):\n \"\"\"Class method for constructing a block from a Rhino poly-surface.\n\n Parameters\n ----------\n guid : str\n The GUID of the poly-surface.\n frame : :class:`Frame`\n Origin frame of the element.\n Notes\n -----\n In Rhino, poly-surfaces are organised such that the cycle directions of\n the individual sub-surfaces produce normal vectors that point out of the\n enclosed volume. The normal vectors of the faces of the mesh, therefore\n also point \"out\" of the enclosed volume.\n \"\"\"\n from compas_rhino.geometry import RhinoSurface\n element = cls(frame)\n element._source = RhinoSurface.from_guid(guid)\n element._mesh = element._source.brep_to_compas()\n return element\n\n @classmethod\n def from_rhinomesh(cls, guid, frame):\n \"\"\"Class method for constructing a block from a Rhino mesh.\n\n Parameters\n ----------\n guid : str\n The GUID of the mesh.\n frame : :class:`Frame`\n Origin frame of the element.\n \"\"\"\n from compas_rhino.geometry import RhinoMesh\n element = cls(frame)\n element._source = RhinoMesh.from_guid(guid)\n element._mesh = element._source.mesh.to_compas()\n return element\n\n @property\n def mesh(self):\n \"\"\"Mesh of the element.\"\"\"\n if not self._source:\n return None\n\n if self._mesh:\n return self._mesh\n\n if isinstance(self._source, Mesh):\n self._mesh = self._source\n else:\n self._mesh = Mesh.from_shape(self._source)\n\n return self._mesh\n\n @mesh.setter\n def mesh(self, mesh):\n self._source = self._mesh = mesh\n\n @property\n def frame(self):\n \"\"\"Frame of the element.\"\"\"\n return self._frame\n\n @frame.setter\n def frame(self, frame):\n self._frame = frame.copy()\n\n @property\n def tool_frame(self):\n \"\"\"tool frame of the element\"\"\"\n if not self._tool_frame:\n self._tool_frame = self.frame.copy()\n\n return self._tool_frame\n\n @tool_frame.setter\n def tool_frame(self, frame):\n self._tool_frame = frame.copy()\n\n @property\n def pose_quaternion(self):\n \"\"\" formats the element's tool frame to a pose quaternion and returns the pose\"\"\"\n return list(self._tool_frame.point) + list(self._tool_frame.quaternion)\n\n @property\n def centroid(self):\n return self._mesh.centroid()\n\n @property\n def face_frames(self):\n \"\"\"Compute the local frame of each face of the element's mesh.\n\n Returns\n -------\n dict\n A dictionary mapping face identifiers to face frames.\n \"\"\"\n return {fkey: self.face_frame(fkey) for fkey in self._mesh.faces()}\n\n def face_frame(self, fkey):\n \"\"\"Compute the frame of a specific face.\n\n Parameters\n ----------\n fkey : hashable\n The identifier of the frame.\n\n Returns\n -------\n frame\n The frame of the specified face.\n \"\"\"\n xyz = self._mesh.face_coordinates(fkey)\n o = self._mesh.face_center(fkey)\n w = self._mesh.face_normal(fkey)\n u = [xyz[1][i] - xyz[0][i] for i in range(3)] # align with longest edge instead?\n v = cross_vectors(w, u)\n uvw = normalize_vector(u), normalize_vector(v), normalize_vector(w)\n return o, uvw\n\n @property\n def top(self):\n \"\"\"Identify the *top* face of the element's mesh.\n\n Returns\n -------\n int\n The identifier of the face.\n\n Notes\n -----\n The face with the highest centroid is considered the *top* face.\n \"\"\"\n fkey_centroid = {fkey: self._mesh.face_center(fkey) for fkey in self._mesh.faces()}\n fkey, _ = sorted(fkey_centroid.items(), key=lambda x: x[1][2])[-1]\n return fkey\n\n @property\n def center(self):\n \"\"\"Compute the center of mass of the element.\n\n Returns\n -------\n point\n The center of mass of the element.\n \"\"\"\n vertices = [self._mesh.vertex_coordinates(key) for key in self._mesh.vertices()]\n faces = [self._mesh.face_vertices(fkey) for fkey in self._mesh.faces()]\n return centroid_polyhedron((vertices, faces))\n\n @property\n def volume(self):\n \"\"\"Compute the volume of the element.\n\n Returns\n -------\n float\n The volume of the element.\n \"\"\"\n vertices = [self._mesh.vertex_coordinates(key) for key in self._mesh.vertices()]\n faces = [self._mesh.face_vertices(fkey) for fkey in self._mesh.faces()]\n v = volume_polyhedron((vertices, faces))\n return v\n\n @classmethod\n def from_data(cls, data):\n \"\"\"Construct an element from its data representation.\n\n Parameters\n ----------\n data : :obj:`dict`\n The data dictionary.\n\n Returns\n -------\n Element\n The constructed element.\n \"\"\"\n element = cls(Frame.worldXY())\n element.data = data\n return element\n\n @property\n def data(self):\n \"\"\"Returns the data dictionary that represents the element.\n\n Returns\n -------\n dict\n The element data.\n\n Examples\n --------\n >>> element = Element(Frame.worldXY())\n >>> print(element.data)\n \"\"\"\n d = dict(frame=self.frame.to_data())\n\n # Only include gripping plane if attribute is really set\n # (unlike the property getter that defaults to `self.frame`)\n if self._tool_frame:\n d['_tool_frame'] = self._tool_frame.to_data()\n\n if self._source:\n d['_source'] = _serialize_to_data(self._source)\n\n if self._mesh:\n #d['_mesh'] = _serialize_to_data(self._mesh)\n d['_mesh'] = self._mesh.to_data()\n\n if self.trajectory:\n d['trajectory'] = [f.to_data() for f in self.trajectory]\n\n if self.path:\n d['path'] = [f.to_data() for f in self.path]\n\n return d\n\n @data.setter\n def data(self, data):\n self.frame = Frame.from_data(data['frame'])\n if '_tool_frame' in data:\n self.tool_frame = Frame.from_data(data['_tool_frame'])\n if '_source' in data:\n self._source = _deserialize_from_data(data['_source'])\n if '_mesh' in data:\n #self._mesh = _deserialize_from_data(data['_mesh'])\n self._mesh = Mesh.from_data(data['_mesh'])\n if 'trajectory' in data:\n #from compas_fab.robots import JointTrajectory\n #self.trajectory = JointTrajectory.from_data(data['trajectory'])\n self.trajectory = _deserialize_from_data(data['trajectory'])\n if 'path' in data:\n self.path = [Frame.from_data(d) for d in data['path']]\n\n def to_data(self):\n \"\"\"Returns the data dictionary that represents the element.\n\n Returns\n -------\n dict\n The element data.\n\n Examples\n --------\n >>> from compas.geometry import Frame\n >>> e1 = Element(Frame.worldXY())\n >>> e2 = Element.from_data(element.to_data())\n >>> e2.frame == Frame.worldXY()\n True\n \"\"\"\n return self.data\n\n def transform(self, transformation):\n \"\"\"Transforms the element.\n\n Parameters\n ----------\n transformation : :class:`Transformation`\n\n Returns\n -------\n None\n\n Examples\n --------\n >>> from compas.geometry import Box\n >>> from compas.geometry import Translation\n >>> element = Element.from_box(Box(Frame.worldXY(), 1, 1, 1))\n >>> element.transform(Translation([1, 0, 0]))\n \"\"\"\n self.frame.transform(transformation)\n if self._tool_frame:\n self.tool_frame.transform(transformation)\n if self._source:\n if type(self._source) == Mesh:\n mesh_transform(self._source, transformation) # it would be really good to have Mesh.transform()\n else:\n self._source.transform(transformation)\n if self._mesh:\n mesh_transform(self._mesh, transformation) # it would be really good to have Mesh.transform()\n if self.path:\n [f.transform(transformation) for f in self.path]\n\n def transformed(self, transformation):\n \"\"\"Returns a transformed copy of this element.\n\n Parameters\n ----------\n transformation : :class:`Transformation`\n\n Returns\n -------\n Element\n\n Examples\n --------\n >>> from compas.geometry import Box\n >>> from compas.geometry import Translation\n >>> element = Element.from_box(Box(Frame.worldXY(), 1, 1, 1))\n >>> element2 = element.transformed(Translation([1, 0, 0]))\n \"\"\"\n elem = self.copy()\n elem.transform(transformation)\n return elem\n\n def copy(self):\n \"\"\"Returns a copy of this element.\n\n Returns\n -------\n Element\n \"\"\"\n elem = Element(self.frame.copy())\n if self._tool_frame:\n elem.tool_frame = self.tool_frame.copy()\n if self._source:\n elem._source = self._source.copy()\n if self._mesh:\n elem._mesh = self._mesh.copy()\n if self.path:\n elem.path = [f.copy() for f in self.path]\n\n return elem\n","sub_path":"src/assembly_information_model/assembly/element.py","file_name":"element.py","file_ext":"py","file_size_in_byte":13500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"120855478","text":"import sys\nfrom collections import deque\nr=sys.stdin.readline\nN,M=map(int,r().split())\nboard=[]\nD=[(1,0),(-1,0),(0,1),(0,-1)]\nfor _ in range(N):\n board.append(list(r().strip()))\nfor i in range(N):\n for j in range(M):\n if board[i][j]==\"R\":\n R=[i,j]\n board[i][j]=\".\"\n elif board[i][j]==\"B\":\n B=[i,j]\n board[i][j]=\".\"\n\ndef move(x,y,d):\n dist=0\n while True:\n nextPos=board[x+d[0]][y+d[1]]\n if nextPos=='.':\n x,y=x+d[0],y+d[1]\n elif nextPos=='O':\n return True,0,[-1,-1]\n elif nextPos=='#':\n return False,dist,[x,y]\n dist+=1\n\ndef bfs():\n q=deque()\n q.append([R,B,0])\n visit=set()\n visit.add((tuple(R),tuple(B)))\n while q:\n red,blue,cnt=q.popleft()\n tmpRed,tmpBlue=red,blue\n #if cnt==10: return -1\n for i in range(4): #4방향\n flgR,distR,red=move(tmpRed[0],tmpRed[1],D[i])#일단 움직이고보자\n flgB,distB,blue=move(tmpBlue[0],tmpBlue[1],D[i])\n if flgR and not flgB: \n return cnt+1#빨간색은 들어가고 파란색은 아니면 성공\n elif flgB: continue #파란색이 들어가면 실패\n elif not flgR and not flgB: #일단 둘다 구멍에 안들어가고\n if red==blue: #겹치는 경우\n if distR>distB:\n red=red[0]-D[i][0],red[1]-D[i][1]\n else:\n blue=blue[0]-D[i][0],blue[1]-D[i][1]\n if (tuple(red),tuple(blue)) not in visit:\n q.append([red,blue,cnt+1]) #다시 큐로\n visit.add((tuple(red),tuple(blue)))\n return -1\nprint(bfs())\n","sub_path":"Python/BOJ/13460.py","file_name":"13460.py","file_ext":"py","file_size_in_byte":1785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"291101132","text":"import json\n\nfrom datetime import date\nfrom django.utils.html import urlencode\n\nfrom django.test import TestCase\nfrom django.urls import reverse\n\nfrom cleanups.factories import *\n\n\nclass CleanupsAPIViewsTestCase(TestCase):\n def setUp(self):\n self.url = reverse('api:cleanups')\n self.user = UserFactory()\n self.location = LocationFactory()\n CleanupFactory()\n CleanupFactory()\n CleanupFactory()\n\n def test_api_cleanup_list_view(self):\n response = self.client.get(self.url)\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.data), 3)\n\n def test_api_cleanup_create_view(self):\n cleanup = {\n 'title': 'Test Cleanup Create',\n 'description': 'A test event.',\n 'start_time': '09:30 AM',\n 'end_time': '11:30 AM',\n 'date': str(date.today()),\n 'host': self.user.id,\n 'location': {'number': '333',\n 'street': 'Beach Ave'}\n }\n self.client.force_login(self.user)\n response = self.client.post(self.url, data=json.dumps(cleanup),\n content_type='application/json')\n self.assertEqual(response.status_code, 201)\n\n def test_api_cleanup_update_success(self):\n cleanup = CleanupFactory(title='Oakland Test Cleanup', host=self.user)\n url = reverse('api:cleanup-detail', args=[cleanup.id])\n new_name = 'Oakland Cleanup'\n data = {'title': new_name,\n 'description': cleanup.description,\n 'host': self.user.id,\n 'date': str(date.today()),\n 'start_time': cleanup.start_time,\n 'end_time': cleanup.end_time,\n 'location': {'id': cleanup.location.id,\n 'number': '122',\n 'street': 'Sandy Lane'}\n }\n self.client.force_login(self.user)\n response = self.client.put(url, data=json.dumps(data),\n content_type='application/json')\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.data.get('title'), new_name)\n self.assertEqual(response.data.get('location')['number'], '122')\n\n def test_api_cleanup_patch_success(self):\n cleanup = CleanupFactory(description=\"An old description.\", host=self.user)\n url = reverse('api:cleanup-detail', args=[cleanup.id])\n data = {'title': cleanup.title,\n 'description': 'A new description.',\n 'host': self.user.id}\n self.client.force_login(self.user)\n response = self.client.patch(url, data=json.dumps(data),\n content_type='application/json')\n\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, 'A new description.')\n\n def test_api_cleanup_change_participant_status(self):\n cleanup = CleanupFactory(title='Oakland Test Cleanup')\n url = reverse('api:cleanup-detail', kwargs={'pk': cleanup.id})\n data = {'participants': [self.user.id]}\n self.client.force_login(self.user)\n\n # Test participant was added\n response = self.client.patch(url, data=json.dumps(data), follow=True,\n content_type='application/json')\n self.assertEqual(response.status_code, 200)\n self.assertIn(self.user.id, response.data.get('participants'))\n\n # Test participant was removed\n response = self.client.patch(url, data=json.dumps(data), follow=True,\n content_type='application/json')\n self.assertEqual(response.status_code, 200)\n self.assertNotIn(self.user.id, response.data.get('participants'))\n\n def test_api_cleanup_delete_success(self):\n cleanup = CleanupFactory(title='Oakland Test Cleanup', host=self.user)\n url = reverse('api:cleanup-detail', args=[cleanup.id])\n data = {'id': cleanup.id}\n self.client.force_login(self.user)\n response = self.client.delete(url, data=json.dumps(data),\n content_type='application/json')\n self.assertEqual(response.status_code, 204)\n\n\nclass CleanupTemplateViewsTestCase(TestCase):\n def setUp(self):\n self.user = UserFactory()\n self.cleanup = CleanupFactory(host=self.user)\n CleanupFactory()\n CleanupFactory()\n\n def test_cleanup_list_template(self):\n url = reverse('cleanups-list')\n response = self.client.get(url)\n\n self.assertEqual(response.status_code, 200)\n\n def test_cleanup_new_template(self):\n url = reverse('cleanup-new')\n self.client.force_login(self.user)\n response = self.client.get(url, follow=True)\n\n self.assertEqual(response.status_code, 200)\n\n def test_cleanup_create_success(self):\n url = reverse('cleanup-create')\n self.client.force_login(self.user)\n data = urlencode({'title': \"New Cleanup Created\",\n 'description': 'A totally new cleanup event!',\n 'number': '333',\n 'street': 'Newbie Ave',\n 'image': 'default.jpg',\n 'host': self.user,\n 'date': str(date.today()),\n 'start_time': '3:30 PM',\n 'end_time': '7:30 PM'})\n response = self.client.post(url, data=data,\n content_type='application/x-www-form-urlencoded')\n\n self.assertEqual(response.status_code, 200)\n\n def test_cleanup_edit_template(self):\n url = reverse('cleanup-edit', args=[self.cleanup.id])\n self.client.force_login(self.user)\n response = self.client.get(url)\n\n self.assertEqual(response.status_code, 200)\n\n def test_cleanup_edit_success(self):\n url = reverse('cleanup-edit', args=[self.cleanup.id])\n self.client.force_login(self.user)\n data = {'title': \"New Name Edited\",\n 'description': self.cleanup.description,\n 'number': '122',\n 'street': 'Sandy Lane',\n 'host': self.user.id,\n 'date': str(date.today()),\n 'start_time': self.cleanup.start_time,\n 'end_time': self.cleanup.end_time}\n response = self.client.put(url, data=data,\n content_type='application/x-www-form-urlencoded')\n\n self.assertEqual(response.status_code, 200)\n\n def test_cleanup_join_success(self):\n user = UserFactory()\n url = reverse('join-cleanup', args=[self.cleanup.id])\n data = urlencode({'participants': user.username})\n\n self.client.force_login(user)\n response = self.client.post(url, data=data,\n content_type='application/x-www-form-urlencoded')\n self.assertEqual(response.status_code, 200)\n\n def test_cleanup_delete_success(self):\n cleanup = CleanupFactory(host=self.user)\n url = reverse('cleanup-delete', args=[cleanup.id])\n self.client.force_login(self.user)\n\n response = self.client.delete(url, follow=True)\n\n self.assertEqual(response.status_code, 200)\n self.assertRedirects(response, reverse('dashboard'))\n","sub_path":"trashtalk/apps/cleanups/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":7393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"646829340","text":"from celery import task\r\n\r\nfrom django.core.mail import send_mail\r\n\r\nfrom .models import Order\r\n\r\n\r\n@task\r\ndef order_created(order_id):\r\n '''\r\n Task to send an email notification when an order\r\n is successfully created.\r\n '''\r\n order = Order.objects.get(id=order_id)\r\n subject = f'Order nr. {order.id}'\r\n message = f'Dear {order.buyer.username}, \\n\\n You have successfully placed an order. Your order ID is {order.id}.'\r\n mail_sent = send_mail(\r\n subject, message, 'admin@example.com', [order.buyer.email])\r\n\r\n return mail_sent\r\n","sub_path":"orders/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"21054826","text":"import re\n\ndef get_line_bounds(cursor):\n end = cursor.copy()\n start = end.copy()\n start.set_line(end.get_line())\n\n very_end = end.copy()\n very_end.forward_to_end()\n\n if end.get_line() == very_end.get_line():\n start.backward_char()\n return start, very_end\n\n if not end.ends_line():\n end.forward_to_line_end()\n \n end.forward_char()\n \n return start, end \n \ndef cursor_on_start_or_end_whitespace(cursor):\n if cursor.starts_line() or cursor.ends_line():\n return True\n \n start, end = get_line_bounds(cursor)\n starttext = start.get_text(cursor)\n endtext = cursor.get_text(end)\n\n if starttext.strip() == u'' or endtext.strip() == u'':\n return True\n \n return False\n\nmatch_ws = re.compile(u'(?u)^[ \\t]*')\ndef get_whitespace(start):\n if start.is_end():\n return u''\n \n match = match_ws.search(line_text(start))\n if match:\n return match.group(0)\n else:\n return u''\n\ndef line_text(iter):\n if not iter.starts_line():\n iter = iter.copy()\n iter.set_line(iter.get_line())\n\n end = iter.copy()\n if not end.ends_line():\n end.forward_to_line_end()\n \n return iter.get_text(end)\n \ndef line_is_empty(iter):\n return iter.is_end() or line_text(iter).strip() == u''\n\ndef iter_lines(from_iter, delta):\n line_count = from_iter.get_buffer().get_line_count()\n iter = from_iter.copy()\n while True:\n newline = iter.get_line() + delta\n if newline < 0 or newline > line_count - 1:\n return\n \n olditer = iter.copy()\n iter.set_line(iter.get_line() + delta)\n \n yield olditer, iter\n\ndef get_next_not_empty_line(from_iter, delta):\n for p, n in iter_lines(from_iter, delta):\n if not line_is_empty(n):\n return n\n \n return None\n\ndef next_line(iter):\n result = iter.copy()\n result.set_line(result.get_line() + 1)\n return result\n\ndef prev_line(iter):\n result = iter.copy()\n result.set_line(result.get_line() - 1)\n return result","sub_path":"snaked/plugins/edit_and_select/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":2099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"69224848","text":"'''\n\nIt is possible to write five as a sum in exactly six different ways:\n\n4 + 1\n3 + 2\n3 + 1 + 1\n2 + 2 + 1\n2 + 1 + 1 + 1\n1 + 1 + 1 + 1 + 1\n\nHow many different ways can one hundred be written as a sum of at least two positive integers?\n\n'''\n\n\ndef n_ways(n,term_max,sums):\n\tif n==0:\n\t\treturn 1\n\tif n==1:\n\t\treturn 1\n\tif term_max==1:\n\t\treturn term_max\n\tif n in sums:\n\t\tif term_max > n and n in sums[n]:\n\t\t\treturn sums[n][n]\n\t\telif term_max in sums[n]:\n\t\t\treturn sums[n][term_max]\n\n\ta = 0\n\tif term_max>n:\n\t\tterm_max = n\n\ttemp = term_max\n\twhile temp>=1:\n\t\tb = n_ways(n-temp,temp,sums)\n\t\ttemp-=1\n\t\ta+=b\n\tif n in sums:\n\t\tsums[n][term_max] = a\n\telse:\n\t\tsums[n] = {}\n\t\tsums[n][term_max] = a\n\treturn a\n\n\n\ndef main():\n\tsums={}\n\tprint(n_ways(100,99,sums))\n\tprint(sums)\n\nif __name__ == '__main__':\n\tmain()\n\n\n\n\n","sub_path":"Problem76.py","file_name":"Problem76.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"236756805","text":"#!/usr/bin/env python\n#-*- coding: UTF-8 -*-\n\n__author__ = 'helljump'\n\nfrom PyQt4 import QtGui\nfrom PyQt4 import QtCore\nfrom PyQt4 import Qsci\nimport logging\nimport startup\nfrom datetime import datetime\nimport time\nimport os\nimport re\nimport transaction\nimport random\nfrom utils import MyProgressDialog\nfrom persistent.dict import PersistentDict\nimport codecs\nimport unicodecsv as csv\nfrom dbobj import Document\nfrom ui.autodate_ui import Ui_Dialog\nfrom dbobj import Template\nimport sys\n\n\nlog = logging.getLogger(__name__)\n\n\nclass Dialog(QtGui.QDialog, Ui_Dialog):\n def __init__(self, project, parent=None):\n super(Dialog, self).__init__(parent)\n self.setupUi(self)\n self.project = project\n root = project.root()\n prefs = root.get('prefs', {}).get('autodate', {})\n self.from_dt.setDateTime(prefs.get('from', datetime(2000, 1, 1)))\n self.to_dt.setDateTime(prefs.get('to', datetime.now()))\n\n @QtCore.pyqtSlot()\n def on_from_dt_clicked(self):\n log.debug('from')\n a = time.mktime(datetime(2000, 1, 1).timetuple())\n b = time.mktime(datetime.now().timetuple())\n c = datetime.fromtimestamp(random.randint(a, b))\n self.from_dt.setDateTime(c)\n\n @QtCore.pyqtSlot()\n def on_to_dt_clicked(self):\n log.debug('to')\n a = time.mktime(self.from_dt.dateTime().toPyDateTime().timetuple())\n b = time.mktime(datetime.now().timetuple())\n c = datetime.fromtimestamp(random.randint(a, b))\n self.to_dt.setDateTime(c)\n\n def accept(self):\n root = self.project.root()\n root['prefs']['autodate'] = {\n 'from': self.from_dt.dateTime().toPyDateTime(),\n 'to': self.to_dt.dateTime().toPyDateTime()\n }\n transaction.commit()\n self.setResult(1)\n self.hide()\n\n\ndef process(prj, parent=None):\n dlg = Dialog(prj, parent)\n rc = dlg.exec_()\n if not rc:\n return\n a = time.mktime(dlg.from_dt.dateTime().toPyDateTime().timetuple())\n b = time.mktime(dlg.to_dt.dateTime().toPyDateTime().timetuple())\n if a > b:\n a, b = b, a\n docs = prj.root()['documents']\n pdlg = MyProgressDialog(u'Обработка', u'Установка случайной даты', u'Отмена', 0, len(docs), parent=parent)\n pdlg.show()\n try:\n for i, doc in enumerate(docs):\n doc.date = datetime.fromtimestamp(random.randint(a, b))\n pdlg.set_value(i)\n QtGui.qApp.processEvents()\n if pdlg.wasCanceled():\n transaction.abort()\n break\n else:\n docs.sort()\n transaction.commit()\n if parent is not None:\n parent.model.layoutChanged.emit()\n except:\n transaction.abort()\n raise\n finally:\n pdlg.close()\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.DEBUG)\n root = startup.CONN.root()\n prj = root[\"projects\"][0]\n prj.open()\n process(prj)\n prj.close()\n","sub_path":"autodata_u.py","file_name":"autodata_u.py","file_ext":"py","file_size_in_byte":3014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"325506938","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# Move commons category sitelinks to category items where needed\n# Mike Peel 10-Jun-2018 v1\n\nfrom __future__ import unicode_literals\n\nimport pywikibot\nfrom pywikibot.data import api\nimport numpy as np\nimport time\nimport string\nfrom pywikibot import pagegenerators\nimport urllib\nimport pprint\nimport csv\nimport random\nfrom pibot_functions import *\n\ndatabase = False\nmanual = True\nmaxnum = 1000000\nusetemplate = 0\nusecategory = 0\nwikidata_site = pywikibot.Site(\"wikidata\", \"wikidata\")\nrepo = wikidata_site.data_repository() # this is a DataSite object\ncommons = pywikibot.Site('commons', 'commons')\n\n# From https://gist.github.com/ettorerizza/7eaebbd731781b6007d9bdd9ddd22713\ndef search_entities(site, itemtitle):\n params = { 'action' :'wbsearchentities', \n 'format' : 'json',\n 'language' : 'en',\n 'type' : 'item',\n 'search': itemtitle}\n request = api.Request(site=site, parameters=params)\n return request.submit()\n\ndef search_entities_es(site, itemtitle):\n params = { 'action' :'wbsearchentities', \n 'format' : 'json',\n 'language' : 'es',\n 'type' : 'item',\n 'search': itemtitle}\n request = api.Request(site=site, parameters=params)\n return request.submit()\n\ndef get_entities(site, wdItem):\n request = api.Request(site=site,\n action='wbgetentities',\n format='json',\n ids=wdItem,\n languages='en|fr',\n props='sitelinks/urls|descriptions|aliases|labels',\n sitefilter='enwiki|frwiki') \n return request.submit()\n\ndef prettyPrint(variable):\n pp = pprint.PrettyPrinter(indent=4)\n pp.pprint(variable)\n\ndef runimport(targetcat):\n print(targetcat.title())\n try:\n wd_item = pywikibot.ItemPage.fromPage(targetcat)\n item_dict = wd_item.get()\n print('http://www.wikidata.org/wiki/'+wd_item.title())\n # targetcat.touch()\n return 0\n except:\n print('No existing link')\n\n # searchkeys = ['map', 'Map', 'interior', 'Interior', 'inside', 'Inside']#, 'taken', 'Taken']\n # for searchkey in searchkeys:\n # if searchkey in targetcat.title():\n # print(targetcat.title())\n # input('continue?')\n # break\n\n searchname = targetcat.title().replace('Category:','')\n searchname2 = searchname.split('(', 1)[0]\n if searchname2 != '':\n searchname = searchname2\n wikidataEntries = search_entities(wikidata_site, searchname)\n wikidataEntries_es = search_entities_es(wikidata_site, searchname)\n results = wikidataEntries['search'] + wikidataEntries_es['search']\n prettyPrint(results)\n if results != []:\n # results = wikidataEntries['search']\n # prettyPrint(results)\n numresults = len(results)\n for i in reversed(range(0,numresults)):\n qid = results[i]['id']\n try:\n candidate_item = pywikibot.ItemPage(repo, qid)\n candidate_item_dict = candidate_item.get()\n except:\n print('Huh - no page found')\n\n skip = 0\n try:\n p31 = candidate_item_dict['claims']['P31']\n for clm in p31:\n if 'Q4167410' in clm:\n skip = 1\n except:\n null = 0\n if skip == 1:\n continue\n incat = 0\n try:\n p18 = candidate_item_dict['claims']['P18']\n for clm in p18:\n title = clm.getTarget()\n print(title.title())\n page = pywikibot.Page(commons, title.title())\n test = page.get()\n if '[['+targetcat.title()+']]' in test:\n incat = 1\n continue\n else:\n incat = 2\n if incat == 1:\n continue\n except:\n print('No image found')\n try:\n sitelink = get_sitelink_title(candidate_item_dict['sitelinks']['commonswiki'])\n except:\n try:\n existing_id = candidate_item_dict['claims']['P910']\n print('P910 exists, following that.')\n for clm2 in existing_id:\n candidate_item = clm2.getTarget()\n candidate_item_dict = candidate_item.get()\n print(candidate_item.title())\n except:\n null = 0\n # Try the sitelink check again\n try:\n sitelink = get_sitelink_title(candidate_item_dict['sitelinks']['commonswiki'])\n except:\n # No existing sitelink found, add the new one\n data = {'sitelinks': [{'site': 'commonswiki', 'title': targetcat.title()}]}\n try:\n if manual:\n print(\"\\n\\n\")\n prettyPrint(candidate_item_dict)\n print(data)\n print('http://www.wikidata.org/wiki/'+qid)\n print('http://commons.wikimedia.org/wiki/'+targetcat.title().replace(\" \",'_'))\n if incat == 1:\n print('Image is in category')\n elif incat == 2:\n print('Image not in category')\n else:\n print('No image')\n # if incat == 1:\n text = input(\"Save? \")\n if text == 'y':\n candidate_item.editEntity(data, summary=u'Add commons sitelink')\n targetcat.touch()\n return 1\n else:\n return 0\n else:\n if incat == 1:\n candidate_item.editEntity(data, summary=u'Add commons sitelink based on label and image')\n return 1\n else:\n return 0\n except:\n print('Edit failed')\n return 0\n return 0\n\nexisting_uses = {}\nif database:\n print('Loading database...')\n with open('commons_wikidata_infobox_uses.csv', mode='r') as infile:\n reader = csv.reader(infile)\n existing_uses = {rows[0] for rows in reader}\n print('Database loaded!')\n\nnummodified = 0\nif usetemplate:\n templates = ['South African Heritage Site']\n template = pywikibot.Page(commons, 'Template:'+templates[0])\n targetcats = template.embeddedin(namespaces='14')\n\n for targetcat in targetcats:\n print(targetcat.title())\n\n if targetcat.title() in existing_uses:\n print('In database')\n continue\n else:\n runimport(targetcat)\nelif usecategory:\n # targetcats = ['Category:Galleries, Libraries, Archives and Museums (GLAM)']\n # targetcats = ['Category:Canary Islands']\n # targetcats = ['Category:Astronomy']\n # targetcats = ['Category:COVID-19 pandemic']\n targetcats = ['Category:Uses of Wikidata Infobox with no item']#'Category:CommonsRoot']\n # targetcats = ['Category:Santa Cruz de Tenerife']\n # targetcats = ['Category:Cultural heritage monuments in Norway with known IDs']#['Category:São Vicente (São Paulo)']\n # New style of category walker\n numchecked = 0\n catschecked = 0\n i = 0\n seen = set(targetcats)\n active = set(targetcats)\n\n while active:\n i+=1\n next_active = set()\n for item in sorted(active):\n cat = pywikibot.Category(commons,item)\n if cat.title() not in existing_uses:\n nummodified += runimport(cat)\n else:\n print('Already in database')\n numchecked += 1\n print(str(nummodified) + \" - \" + str(numchecked) + \"/\" + str(len(seen)) + \"/\" + str(len(active)) + \"/\" + str(len(next_active)))\n\n # See if there are subcategories that we want to check in the future\n if i == 1:\n for result in pagegenerators.SubCategoriesPageGenerator(cat, recurse=False):\n if result.title() not in seen:\n seen.add(result.title())\n next_active.add(result.title())\n temp = list(next_active)\n random.shuffle(temp)\n active = set(temp)\n if nummodified >= maxnum:\n print('Reached the maximum of ' + str(maxnum) + ' entries modified, quitting!')\n break\nelse:\n # Pick random categories\n while nummodified < maxnum:\n targets = pagegenerators.RandomPageGenerator(total=100, site=commons, namespaces='14')\n for target in targets:\n print(target.title())\n if target.title() not in existing_uses:\n nummodified += runimport(target)\n print(nummodified)\n \n if nummodified >= maxnum:\n print('Reached the maximum of ' + str(maxnum) + ' entries modified, quitting!')\n break\n\n# EOF","sub_path":"commons_wikidata_search.py","file_name":"commons_wikidata_search.py","file_ext":"py","file_size_in_byte":9321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"179624290","text":"# -*- coding: utf-8 -*-\n\nfrom markdown import Markdown\n\n# The imports in this file are order-sensitive\n\nfrom flask import Flask\nfrom flask.ext.assets import Environment, Bundle\nfrom flask.ext.flatpages import FlatPages\nfrom flask.ext.lastuser import Lastuser\nfrom flask.ext.lastuser.sqlalchemy import UserManager\n\nfrom datetime import datetime\n\nfrom baseframe import baseframe, baseframe_js, baseframe_css\nimport coaster.app\n\n# First, make an app and config it\n\napp = Flask(__name__, instance_relative_config=True)\nlastuser = Lastuser()\npages = FlatPages(app)\n\n# Second, after config, import the models and views\n\nimport hasweb.models\nimport hasweb.views\n\n# Third, setup baseframe and assets\n\napp.register_blueprint(baseframe)\n\nassets = Environment(app)\njs = Bundle(baseframe_js,\n 'js/script.js',\n 'js/jquery.tablesorter.min.js',\n 'js/jquery.textarea-expander.js',\n 'js/showdown.js')\ncss = Bundle(baseframe_css,\n 'css/app.css',\n filters='cssmin', output='css/packed.css')\nassets.register('js_all', js)\nassets.register('css_all', css)\n\n\ndef init_for(env):\n coaster.app.init_app(app, env)\n hasweb.models.db.init_app(app)\n lastuser.init_app(app)\n lastuser.init_usermanager(UserManager(hasweb.models.db, hasweb.models.User))\n\n\n@app.template_filter('age')\ndef age(dt):\n suffix = u\"ago\"\n delta = datetime.utcnow() - dt\n if delta.days == 0:\n # < 1 day\n if delta.seconds < 10:\n return \"seconds %s\" % suffix\n elif delta.seconds < 60:\n return \"%d seconds %s\" % (delta.seconds, suffix)\n elif delta.seconds < 120:\n return \"a minute %s\" % suffix\n elif delta.seconds < 3600: # < 1 hour\n return \"%d minutes %s\" % (int(delta.seconds / 60), suffix)\n elif delta.seconds < 7200: # < 2 hours\n return \"an hour %s\" % suffix\n else:\n return \"%d hours %s\" % (int(delta.seconds / 3600), suffix)\n elif delta.days == 1:\n return u\"a day %s\" % suffix\n else:\n return u\"%d days %s\" % (delta.days, suffix)\n\n\n@app.template_filter('markdown')\ndef to_markdown(text):\n markdown = Markdown(safe_mode=\"escape\").convert\n return markdown(text)\n","sub_path":"hasweb/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"596252710","text":"from selenium import webdriver\r\nfrom selenium.webdriver.chrome.options import Options\r\nimport html2text\r\nimport time\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.common.exceptions import TimeoutException\r\n\r\n\r\nimport bs4\r\n# CHROME_PATH = 'C:\\Program Files (x86)\\Google\\Chrome\\Application\\chrome.exe'\r\n# CHROMEDRIVER_PATH = 'C:/Program Files/chromedriver'\r\n# WINDOW_SIZE = \"1920,1080\"\r\n#\r\n# chrome_options = Options()\r\n# chrome_options.add_argument(\"--headless\")\r\n# chrome_options.add_argument(\"--window-size=%s\" % WINDOW_SIZE)\r\n# chrome_options.binary_location = CHROME_PATH\r\n#\r\n# driver = webdriver.Chrome(executable_path=CHROMEDRIVER_PATH,\r\n# chrome_options=chrome_options\r\n# )\r\n# h = html2text.HTML2Text()\r\ndriver = webdriver.Chrome('C:/Program Files/chromedriver')\r\ndelay = 4\r\ndef findingSkills(job_title):\r\n driver.get('https://www.linkedin.com/login')\r\n time.sleep(20)\r\n #job_title=\"fashion designer\" #hardcoded for now\r\n job_split = job_title.split()\r\n\r\n job_title = job_title.replace(' ', '%20')\r\n url = \"https://www.google.dz/search?q=site:linkedin.com/in/%20\"+job_title\r\n driver.get(url)\r\n content = driver.page_source\r\n soup = bs4.BeautifulSoup(content, 'lxml')\r\n links= []\r\n for link in soup.findAll('a'):\r\n if(link.has_attr('href')):\r\n if('https://in.linkedin.com/in/' in link['href']):\r\n links.append(link['href'])\r\n\r\n for i in range(1):\r\n driver.get(links[0])\r\n content = driver.page_source\r\n soup = bs4.BeautifulSoup(content, 'lxml')\r\n\r\n temp = soup.prettify()\r\n with open('some_file.txt', 'w', encoding=\"utf-8\") as f:\r\n f.write(temp)\r\n # print (soup.find(id='bpr-guid-585975'))\r\n # # element = WebDriverWait(driver, delay).until(EC.presence_of_element_located((By.CLASS_NAME, 'pv-skill-category-entity__name-text ')))\r\n # # print(element.text)\r\n\r\n\r\n\r\nfindingSkills('Web Developer')","sub_path":"Main/finding_skills.py","file_name":"finding_skills.py","file_ext":"py","file_size_in_byte":2153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"179954225","text":"from utils import *\nfrom predict import *\nimport sys\nimport predict\n#action : F -> Front -> 0, L -> Left -> 1, R -> Right -> 2\n\n\nnb_poids = 34\nlayers = [5, 3, 3]\nW = [ x*0.1 for x in range(nb_poids)]\n\n\nnext_input_must_be(\"START player\")\nplayer = int(input())\nnext_input_must_be(\"STOP player\")\n\nnext_input_must_be(\"START settings\")\nline = input()\nwhile line != \"STOP settings\":\n line = input()\n\ngrid = []\n\nwhile True:\n # Get sensors information\n next_input_must_be(\"START turn\")\n string_sensors = input()\n other_information = input()\n next_input_must_be(\"STOP turn\")\n\n sensors = getSensorsFromString(string_sensors)\n isCrash, score = parse_other_information(other_information)\n print(isCrash, score, file=sys.stderr)\n\n # Send decision\n print(\"START action\")\n print(take_decision(layers, W, sensors))\n print(\"STOP action\")\n","sub_path":"stupideIa.py","file_name":"stupideIa.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"535239070","text":"# ==========================================================\nimport cv2 # \"Biblioteca\" OpenCV\nimport pytesseract # Módulo para a utilização da tecnologia OCR\n\nimport time # \"Biblioteca\" nativa de Python para calcularmos quantos frames por segundo\nfrom PIL import Image # Importando o módulo Pillow para abrir a imagem no script\nfrom _thread import start_new_thread # Threadq\nimport re\n\nfrom threading import Thread\n\n# ==========================================================\nlet_num = {'O': '0', '0': '0', 'I': '1', '1': '1', 'Z': '2', '2': '2', '3': '3', 'A': '4', '4': '4', 'S': '5',\n '5': '5', '6': '6', '7': '7', 'B': '8', '8': '8', '9': '9', 'C': 'C', 'D': 'D', 'E': 'E', 'F': 'F',\n 'G': 'G', 'H': 'H', 'J': 'J', 'K': 'K', 'L': 'L', 'M': 'M', 'N': 'N', 'P': 'P', 'Q': 'Q', 'R': 'R',\n 'T': 'T', 'U': 'U', 'V': 'V', 'W': 'W', 'X': 'X', 'Y': 'Y'}\n\nnum_let = {'0': 'O', '1': 'I', '2': 'Z', '4': 'A', '5': 'S', '8': 'B', 'A': 'A', 'B': 'B', 'C': 'C', 'D': 'D',\n 'E': 'E', 'F': 'F', 'G': 'G', 'H': 'H', 'I': 'I', 'J': 'J', 'K': 'K', 'L': 'L', 'M': 'M', 'N': 'N',\n 'O': 'O', 'P': 'P', 'Q': 'Q', 'R': 'R', 'S': 'S', 'T': 'T', 'U': 'U', 'V': 'V', 'W': 'W', 'X': 'X',\n 'Y': 'Y', 'Z': 'Z', '3': '3', '6': '6', '7': '7', '9': '9'}\n\n\nclass PlateScanner(Thread):\n def __init__(self, thread_id, name, enable_thread=False):\n Thread.__init__(self)\n self.threadID = thread_id\n self.name = name\n self.enableThread = enable_thread\n\n def run(self):\n self.scan()\n\n def scan(self):\n cap = cv2.VideoCapture(0) # 0 - Primeira camera, 1 - segunda camera, ou 'C:/path/para/arquivo/de/video.avi'\n start = time.time()\n\n # while (True): # Loop Infinito\n while (): # Loop Infinito\n\n # Leitura:\n ret, frame = cap.read() # Lendo frame atual\n frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Convertendo o frame (RGB) para tons de cinza\n cv2.imshow('Tons de Cinza', frame_gray) # Mostrando imagem na janela com título \"Tons de Cinza\"\n\n\n # Calcundo quantos frames por segundo (a cada 2 segundos):\n if time.time() - start > 1: # Se já se passaram os \"x\" segundos:\n start = time.time() # Reseta o tempo inicial\n\n if self.enable_thread: # Se pudermos fazer uma nova leitura OCR:\n start_new_thread(self.runOCR, (frame_gray,)) # Executa o OCR em uma nova Thread, para não travarmos a aplicação\n\n\n # Final:\n if cv2.waitKey(1) & 0xFF == ord(\n 'q'): # Se o usuário estiver com a janela aberta e pressionar \"q\", iremos encerrar o programa\n break\n\n # ==========================================================\n cap.release() # Liberando Webcam para o Sistema Operacional\n cv2.destroyAllWindows() # Fechando todas as janelas do OpenCV\n\n # import os\n\n # os._exit(0)\n\n def runOCR(self, frame_gray):\n # global novaThread\n\n ind_num = [3, 5, 6] #índices de caracteres numericos\n ind_let = [0, 1, 2, 4] #índices de caracteres alfabéticos\n\n # novaThread = False\n\n leitura = pytesseract.image_to_string(Image.fromarray(frame_gray), config='-psm 11') # realiza a leitura e a transformação da imagem em string\n\n\n if len(leitura) > 1:\n\n pattern = re.compile(r'[A-Z0-9]{7}') #Define padrão a ser encontrado na string\n matches = pattern.finditer(leitura) #busca o padrão na variável leitura\n\n for match in matches:\n a = match.group(0) #variável que recebe o padrão identificado\n print(\"===========================================\\n\" \"A placa lida é: \", a)\n\n # Correção da leitura:\n placaCorreg = list(a)\n\n aux_let = [num_let[a[i]] for i in ind_let]\n aux_num = [let_num[a[i]] for i in ind_num]\n\n for i, j in enumerate(ind_num):\n placaCorreg[j] = aux_num[i]\n\n for i, j in enumerate(ind_let):\n placaCorreg[j] = aux_let[i]\n\n # Resultado:\n a1 = ''.join(placaCorreg)\n print(\"A placa corrigida é: \", a1)\n\n\n# ==========================================================\n\n\"\"\"\nUsage:\n tesseract --help | --help-extra | --help-psm | --help-oem | --version\n tesseract --list-langs [--tessdata-dir PATH]\n tesseract --print-parameters [options...] [configfile...]\n tesseract imagename|imagelist|stdin outputbase|stdout [options...] [configfile...]\n\nOCR options:\n --tessdata-dir PATH Specify the location of tessdata path.\n --user-words PATH Specify the location of user words file.\n --user-patterns PATH Specify the location of user patterns file.\n -l LANG[+LANG] Specify language(s) used for OCR.\n -c VAR=VALUE Set value for config variables.\n Multiple -c arguments are allowed.\n --psm NUM Specify page segmentation mode.\n --oem NUM Specify OCR Engine mode.\nNOTE: These options must occur before any configfile.\n\nPage segmentation modes:\n 0 Orientation and script detection (OSD) only.\n 1 Automatic page segmentation with OSD.\n 2 Automatic page segmentation, but no OSD, or OCR.\n 3 Fully automatic page segmentation, but no OSD. (Default)\n 4 Assume a single column of text of variable sizes.\n 5 Assume a single uniform block of vertically aligned text.\n 6 Assume a single uniform block of text.\n 7 Treat the image as a single text line.\n 8 Treat the image as a single word.\n 9 Treat the image as a single word in a circle.\n 10 Treat the image as a single character.\n 11 Sparse text. Find as much text as possible in no particular order.\n 12 Sparse text with OSD.\n 13 Raw line. Treat the image as a single text line,\n bypassing hacks that are Tesseract-specific.\n\nOCR Engine modes:\n 0 Legacy engine only.\n 1 Neural nets LSTM engine only.\n 2 Legacy + LSTM engines.\n 3 Default, based on what is available.\n\nSingle options:\n -h, --help Show minimal help message.\n --help-extra Show extra help for advanced users.\n --help-psm Show page segmentation modes.\n --help-oem Show OCR Engine modes.\n -v, --version Show version information.\n --list-langs List available languages for tesseract engine.\n --print-parameters Print tesseract parameters.\n\"\"\"","sub_path":"PlateScanner.py","file_name":"PlateScanner.py","file_ext":"py","file_size_in_byte":6597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"175645750","text":"# coding=utf-8\n\n\n\"\"\"\nhttps://gist.github.com/rexzhang/58e80a6e588f0c964c4e9e07385d502b\n\"\"\"\n\n\nfrom __future__ import print_function, unicode_literals, absolute_import\n\nimport re\nimport logging\nimport inspect\n\nfrom rest_framework import exceptions\nfrom rest_framework.exceptions import APIException\nfrom rest_framework.viewsets import GenericViewSet\nfrom rest_framework.decorators import (\n renderer_classes,\n)\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.renderers import StaticHTMLRenderer\nfrom rest_framework.response import Response\n\nfrom .drf_api_error_code import DrfApiErrorCode\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass DrfApiGenericViewSet(GenericViewSet):\n\n # ----------------------------------------------------------\n # View 代码简化\n def is_request_data_complete(self, request_param_list, method='GET'):\n \"\"\"request 参数如果有缺失,直接抛异常\"\"\"\n if method == 'GET':\n request_param_data = self.request.query_params\n else:\n request_param_data = self.request.data\n\n for param in request_param_list:\n if param not in request_param_data:\n raise DrfApiError(\n error_code=DrfApiErrorCode.API_PARAM_MISS,\n error_message_ext=\"param miss:{}\".format(param)\n )\n\n def get_params_data(self, required_param_key_list=None, raise_exception=True):\n \"\"\"获取 request 的数据,无论 GET 还是 POST 方法, 如果缺失任何必要参数,可选抛异常\"\"\"\n if self.request.method == 'GET':\n request_params_data = self.request.query_params\n else:\n request_params_data = self.request.data\n\n if required_param_key_list is not None:\n miss_param_key_set = set(required_param_key_list) - set(request_params_data)\n if len(miss_param_key_set) >= 1 and raise_exception:\n raise DrfApiError(\n error_code=DrfApiErrorCode.API_PARAM_MISS,\n error_message_ext=\"param miss:{}\".format(miss_param_key_set)\n )\n\n return request_params_data\n\n def get_object_by_pk(self, pk=None, raise_exception=False):\n \"\"\"根据给定的 pk 获取对应的 object, 如果未找到(包含不存在/没有权限找到/不属于自己)可以选择抛出异常\"\"\"\n try:\n obj = self.get_queryset().filter(id=pk).first()\n except ValueError:\n if raise_exception:\n raise exceptions.NotFound()\n else:\n return None\n\n if obj is None and raise_exception:\n raise exceptions.NotFound()\n\n return obj\n\n @staticmethod\n def raise_api_exception(error_code, error_message_ext=None):\n \"\"\"生成异常 error 信息,并抛出异常\"\"\"\n logger.warning('API Error, code:{} name:{} message_ext:{}'.format(\n error_code, error_code.name, error_message_ext\n ))\n raise DrfApiError(error_code=error_code, error_message_ext=error_message_ext)\n\n # ----------------------------------------------------------\n # 开发阶段工具\n\n @staticmethod\n def get_django_choices_info(choices, is_display_desc=False):\n \"\"\"获取 Django choices 类型信息,主要用于开发辅助接口\"\"\"\n choices_list = []\n if is_display_desc:\n for name, desc in choices:\n choices_list.append({\n 'name': name,\n 'desc': desc,\n })\n\n else:\n for item in choices:\n choices_list.append(item[0])\n\n return choices_list\n\n\n# TODO: 需要重新设计, 尝试与 DRF 风格一致\n# error_message_ext ==> detail ??\n# code:\n# number\n# name\nclass DrfApiError(APIException):\n status_code = 400\n default_detail = 'drf_api extend error info'\n default_code = 'drf_api_extend_error'\n\n def __init__(self, error_code, error_message_ext=None):\n self.detail = self.get_error_detail_info(error_code=error_code, error_message_ext=error_message_ext)\n return\n\n @staticmethod\n def get_error_detail_info(error_code, error_message_ext):\n \"\"\"生成 error detail 信息\"\"\"\n error_detail = {\n 'detail': 'drf_api extend error',\n 'error': {\n 'code': error_code.value,\n 'name': error_code.name,\n }\n }\n if error_message_ext is None:\n error_detail['error']['message_ext'] = None\n else:\n error_detail['error']['message_ext'] = '{}'.format(error_message_ext)\n\n return error_detail\n\n\nclass DrfApiErrorCodeViewSet(GenericViewSet):\n permission_classes = [IsAuthenticated, ]\n\n def __init__(self, drf_api_error_code_class, **kwargs):\n super(DrfApiErrorCodeViewSet, self).__init__(**kwargs)\n\n self.drf_api_error_code_list = list(drf_api_error_code_class)\n self.drf_api_error_code_source = inspect.getsource(drf_api_error_code_class).decode('utf-8')\n\n self.drf_api_error_code_number_length = 8\n self.drf_api_error_code_info_list = []\n return\n\n @staticmethod\n def get_error_message_string(error_code, error_code_source):\n f = re.findall(r'{} .+\\n'.format(error_code), error_code_source)\n if len(f) >= 1:\n error_message = f[0]\n\n m = re.match(r'{} # (?P.+)\\n'.format(error_code), error_message)\n error_message = m.group('error_message')\n else:\n error_message = ''\n\n return error_message\n\n @renderer_classes((StaticHTMLRenderer, ))\n def list(self, request):\n \"\"\"[工具接口]列出 error_code\"\"\"\n error_info_list = []\n drf_api_check_fail_info_list = []\n\n for api_error in self.drf_api_error_code_list:\n error_info = {\n 'code': api_error.value,\n 'name': api_error.name,\n 'message': self.get_error_message_string(\n error_code=api_error.value,\n error_code_source=self.drf_api_error_code_source\n ),\n }\n if len('{}'.format(api_error.value)) != self.drf_api_error_code_number_length:\n drf_api_check_fail_info_list.append({\n 'code': api_error.value,\n 'message': 'error code length no match',\n })\n\n error_info_list.append(error_info)\n\n return Response({\n 'drf_api_check_fail_info_list': drf_api_check_fail_info_list,\n 'error_info_list': error_info_list,\n })\n","sub_path":"app/ddmon_core/runtimes/drf_api_base_class.py","file_name":"drf_api_base_class.py","file_ext":"py","file_size_in_byte":6650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"344382110","text":"import pandas as pd\n\ndef load(path):\n df = pd.read_csv(path,\n header=None,\n names=['day', 'time', 'price', 'bid', 'ask', 'vol'])\n df['date'] = pd.to_datetime(df['day'] + df['time'],\n format='%m/%d/%Y%H:%M:%S')\n df = df.set_index('date')\n df = df.drop(['day', 'time', 'bid', 'ask'],\n axis=1)\n df = df.drop_duplicates()\n return df\n","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"30155258","text":"import rdkit\nimport os.path as osp\nimport graph_conv_many_nuc_util\nfrom graph_conv_many_nuc_util import move\nimport argparse\nimport torch\nfrom torch.nn import Sequential, Linear, ReLU\nimport torch.nn.functional as F\nfrom torch_scatter import scatter_mean\nfrom torch_geometric.datasets import QM9\nimport torch_geometric.transforms as T\nfrom torch_geometric.nn import NNConv\nimport sys\nfrom loader_processing import process\nimport loss_functions\n\ninfile = '/scratch/aqd215/k-gnn/nmr_shift_data/graph_conv_many_nuc_pipeline.datasets/graph_conv_many_nuc_pipeline.data.13C.nmrshiftdb_hconfspcl_nmrshiftdb.aromatic.64.0.mol_dict.pickle'\n \ntrain_loader, test_loader = process(infile)\n\nprint('train loaders in 1-nmr')\nsys.stdout.flush()\nprint(train_loader)\nsys.stdout.flush()\n\nclass Net(torch.nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n M_in, M_out = 37, 128\n nn1 = Sequential(Linear(4, 128), ReLU(), Linear(128, M_in * M_out))\n self.conv1 = NNConv(M_in, M_out, nn1)\n\n M_in, M_out = M_out, 256\n nn2 = Sequential(Linear(4, 128), ReLU(), Linear(128, M_in * M_out))\n self.conv2 = NNConv(M_in, M_out, nn2)\n\n M_in, M_out = M_out, 512\n nn3 = Sequential(Linear(4, 128), ReLU(), Linear(128, M_in * M_out))\n self.conv3 = NNConv(M_in, M_out, nn3)\n\n self.fc1 = torch.nn.Linear(512, 256)\n self.fc2 = torch.nn.Linear(256, 128)\n self.fc3 = torch.nn.Linear(128, 64)\n\n def forward(self, data):\n x = data.x\n x = F.elu(self.conv1(x, data.edge_index, data.edge_attr))\n x = F.elu(self.conv2(x, data.edge_index, data.edge_attr))\n x = F.elu(self.conv3(x, data.edge_index, data.edge_attr))\n\n x = scatter_mean(x, data.batch, dim=0)\n\n x = F.elu(self.fc1(x))\n x = F.elu(self.fc2(x))\n x = self.fc3(x)\n return x.unsqueeze(-1)\n\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nmodel = Net().to(device)\n\n# changed starting lr to 0.01, min_lr to 0.000001 (wider range)\noptimizer = torch.optim.Adam(model.parameters(), lr=0.001)\nscheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.7, patience=3, min_lr=0.000001)\n\n\ndef train(epoch):\n model.train()\n loss_all = 0\n total = 0\n\n # note that the number of atoms exceeds the number of carbons, and therefore there will be many zeros\n for i, data in enumerate(train_loader):\n data = data.to(device)\n optimizer.zero_grad()\n target = torch.FloatTensor(data.y).to(device)\n mask = torch.FloatTensor(data.mask).to(device)\n loss = loss_functions.MSE_loss(model(data), target, mask)\n loss.backward()\n loss_all += loss\n optimizer.step()\n total += 1\n return float(loss_all) / total\n\n\ndef test(loader):\n model.eval()\n error = 0\n total = 0\n\n for data in loader:\n data = data.to(device)\n target = torch.FloatTensor(data.y).to(device)\n mask = torch.FloatTensor(data.mask).to(device)\n error += loss_functions.MAE_loss(model(data), target, mask) # MAE\n total += 1\n return float(error) / total\n\nfor epoch in range(1, 301):\n lr = scheduler.optimizer.param_groups[0]['lr']\n avg_train_loss = train(epoch)\n test_error = test(test_loader)\n scheduler.step(test_error)\n\n print('Epoch: {:03d}, LR: {:7f}, Loss: {:.7f}, Test MAE: {:.7f}'.format(epoch, lr, avg_train_loss, test_error))\n sys.stdout.flush()\n","sub_path":"nmr_shift_data/models/1-nmr-changedLR.py","file_name":"1-nmr-changedLR.py","file_ext":"py","file_size_in_byte":3521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"500973800","text":"import threading\nfrom time import ctime, sleep\n\n\na = 10\n\n\ndef music(sec):\n print(\"Listening music...\")\n global a\n a = 1000\n sleep(sec)\n\n\nt = threading.Thread(target=music, name=\"my thread\", args=(2,))\nt.start()\nprint(\"创建线程\", ctime())\nsleep(3)\nprint('a的值为:', a)\nt.join()\n","sub_path":"aid1805/Process/day04/thread01.py","file_name":"thread01.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"517355488","text":"import requests\nfrom datetime import timedelta\n\nfrom homeassistant.const import CONF_URL, TEMP_CELSIUS\nfrom homeassistant.helpers.entity import Entity\nfrom homeassistant.util import Throttle\n\n# https://home-assistant.io/developers/platform_example_sensor/\n# https://github.com/home-assistant/home-assistant/blob/dev/homeassistant/components/binary_sensor/nest.py\n# https://github.com/cyberjunky/home-assistant-custom-components/blob/master/sensor/solarportal.py\n\nSENSOR_PREFIX = 'Trash '\nTRASH_TYPES = [{1: \"rest\"}, {3: \"gft\"}, {87: \"papier\"}]\nMIN_TIME_BETWEEN_UPDATES = timedelta(hours=1)\n\n\ndef setup_platform(hass, config, add_devices, discovery_info=None):\n\n # \"\"\"Setup the sensor platform.\"\"\"\n url = config.get(CONF_URL)\n data = TrashCollectionSchedule(url, TRASH_TYPES)\n\n devices = []\n for trash_type in TRASH_TYPES:\n for t in trash_type.values():\n devices.append(TrashCollectionSensor(t, data))\n add_devices(devices)\n\n\nclass TrashCollectionSensor(Entity):\n \"\"\"Representation of a Sensor.\"\"\"\n\n def __init__(self, name, data):\n \"\"\"Initialize the sensor.\"\"\"\n self._state = None\n self._name = name\n self.data = data\n\n @property\n def name(self):\n \"\"\"Return the name of the sensor.\"\"\"\n return SENSOR_PREFIX + self._name\n\n @property\n def state(self):\n \"\"\"Return the state of the sensor.\"\"\"\n return self._state\n\n def update(self):\n \"\"\"Fetch new state data for the sensor.\n\n This is the only method that should fetch new data for Home Assistant.\n \"\"\"\n self.data.update()\n for d in self.data.data:\n if d['shortcode'] == self._name:\n self._state = d['pickup_date']\n\n\nclass TrashCollectionSchedule(object):\n\n def __init__(self, url, trash_types):\n self._url = url\n self._trash_types = trash_types\n self.data = None\n\n @Throttle(MIN_TIME_BETWEEN_UPDATES)\n def update(self):\n response = requests.get(self._url)\n content = response.json()\n tschedule = []\n for item in content:\n for id in self._trash_types:\n if int(item['id']) in id.keys():\n trash = {}\n trash['shortcode'] = (next(iter(id.values())))\n trash['id'] = item['id']\n trash['pickup_date'] = item['ophaaldatum']\n tschedule.append(trash)\n self.data = tschedule\n","sub_path":"custom_components/gpd/sensor.py","file_name":"sensor.py","file_ext":"py","file_size_in_byte":2469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"458489961","text":"import random as rand\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\nclass Perceptron:\n def __init__(self, **kwargs):\n self.weights = kwargs.get(\"weight_vector\", None)\n self.learn_step = kwargs.get(\"learn_step\", 0.5)\n self.draw = kwargs.get(\"draw\", False)\n self.log = kwargs.get(\"log\", False)\n self.adaline = kwargs.get(\"adaline\", False)\n self.theta = kwargs.get(\"theta\", 0)\n self.bipolar = kwargs.get(\"bipolar\", False)\n input_num = kwargs.get(\"input_num\", 2)\n self.teaching_iterations = 0\n if self.weights is None:\n self.weights = []\n for i in range(input_num + 1):\n self.weights.append(rand.random() * 2 - 1)\n\n def show(self):\n for i in self.weights:\n print(i)\n\n def net(self, input_vector):\n result = 0\n for (x, w) in zip([1] + list(input_vector), self.weights):\n result += x * w\n return result\n\n def output(self, input_vector):\n result = self.net(input_vector)\n return 1 if result > self.theta else -1 if self.bipolar else 0\n\n def output_set(self, input_set):\n return [(x[-1], self.output(x)) for x in input_set]\n\n def error(self, input_vector, result):\n if self.adaline:\n err = self.error_adaline(input_vector, result)\n else:\n err = result - self.output(input_vector)\n return err\n\n def error_adaline(self, input_vector, result):\n net = self.net(input_vector)\n out = self.output(input_vector)\n return 0 if out is result else result - net\n\n def effectiveness(self, input_set):\n eff = 0\n for (x, y) in self.output_set(input_set):\n if x == y:\n eff += 1\n return eff / len(input_set)\n\n def teach(self, teaching_set, iterations):\n if self.draw:\n plt.hold(True)\n self.print_set(teaching_set)\n if self.log:\n print(self.weights)\n for i in range(iterations):\n correct = 0\n\n for row in teaching_set:\n input_vector = row[:-1]\n e = self.error(input_vector, row[-1])\n if e is 0:\n correct += 1\n new_weights = []\n for w, x in zip(self.weights, [1] + list(input_vector)):\n new_weights.append(w + x * self.learn_step * e)\n self.weights = new_weights\n if self.log:\n print(self.weights)\n print(\"Effectiveness: \" + str(correct / len(teaching_set)))\n if correct / len(teaching_set) > 0.99:\n break\n if self.draw:\n self.draw_function()\n self.teaching_iterations = i\n if self.draw:\n plt.show()\n\n def draw_function(self):\n [w0, w1, w2] = self.weights\n a = -w1 / w2\n b = -w0 / w2\n x = np.arange(0, 1.1, 0.1)\n plt.plot(x, a * x + b)\n\n def print_set(self, t_set):\n plt.plot([x for (x, y, z) in t_set if z is 1], [y for (x, y, z) in t_set if z is 1], 'go')\n plt.plot([x for (x, y, z) in t_set if z is not 1], [y for (x, y, z) in t_set if z is not 1], 'ro')\n\n\ndef gen1(bipolar=False):\n while True:\n (x, y) = (rand.random(), rand.random())\n yield (x, y, 1 if x + y > 1 else -1 if bipolar else 0)\n\n\ndef take(n, gen):\n return list(next(gen) for _ in range(n))\n\n\ndef test1(gens=150, max_iter=500, tset_size=90, test_set=None, **kwargs):\n test = []\n for i in range(gens):\n test.append(Perceptron(**kwargs))\n\n # teaching_set = take(tset_size, gen1())\n avg_iter = 0\n avg_eff = 0\n for perc in test:\n perc.teach(take(tset_size, gen1()), max_iter)\n avg_iter += perc.teaching_iterations\n if test_set is not None:\n avg_eff += perc.effectiveness(test_set)\n avg_iter /= len(test)\n if test_set is not None:\n avg_eff /= len(test)\n return [avg_iter, avg_eff]\n\n\ndef test_step(adaline=False, bipolar=False):\n for i in np.arange(0.1, 1.1, 0.1):\n print(\"Step: \" + str(i) + \" \" + str(\n test1(learn_step=i, gens=50, max_iter=150, adaline=adaline, bipolar=bipolar)[0]))\n\n\ndef test_tset_size():\n test_set1 = take(1000, gen1())\n for i in range(10, 311, 50):\n print(\"Tset size: \" + str(i))\n results = test1(step=0.5, log=False, gens=100, max_iter=100, tset_size=i, test_set=test_set1, adaline=True)\n print(\"Avg_iter: \" + str(results[0]) + \" Avg_eff: \" + str(results[1]))\n\n\ndef test_basic():\n p1 = Perceptron(draw=True, log=True, learn_step=0.5, weight_vector=[0, -1, 1], bipolar=True)\n tset = take(90, gen1(True))\n p1.teach(tset, 50)\n\n\ntest_basic()\n","sub_path":"Perceptron.py","file_name":"Perceptron.py","file_ext":"py","file_size_in_byte":4744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"318461135","text":"import sys\nour_list = []\n\nprint('Insert number')\nour_number = int(sys.stdin.readline())\nour_string = str(our_number)\n\nour_summa = 0\nfor i in our_string:\n our_list.append(i)\n our_summa += int(i)\n\nprint(\"sum of digits = \",our_summa)\n\nour_list.reverse()\nrevers_number = int(\"\".join([str(l) for l in our_list]))\nprint(\"revers of our number = %d\"%(revers_number))\n\nour_list.sort()\nsort_number = int(\"\".join([str(l) for l in our_list]))\nprint(\"sort of our number = %d\"%(sort_number))\n","sub_path":"Hw3/gyshpak/Task2.py","file_name":"Task2.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"2627001","text":"import pandas as pd\nimport numpy as np\nimport csv\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.externals import joblib\nimport matplotlib.pyplot as plt\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis\n# 0: left, 1: down, 2: right, 3: up\n\ncsv_data = pd.read_csv('Train.csv')\ncsv_data = csv_data.values\nboard_data = csv_data[:,0:16]\nprint(board_data[0:4,:])\ndirection_data = csv_data[:,16]\n\n# print(type(board_data))\nX = np.int32(board_data)/11.0\nY = np.int32(direction_data)\nprint(Y.shape)\nX_train, X_test, Y_train, Y_test = train_test_split(X[::10], Y[::10], test_size=0.3)\n# clf = KNeighborsClassifier(n_neighbors=5, n_jobs=8)\n# clf.fit(X_train, Y_train)\n# train_accuracy = clf.score(X_train, Y_train)\n# test_accuracy = clf.score(X_test, Y_test)\n\nk_range = range(1,31)\nk_scores = []\nfor k in k_range:\n knn = KNeighborsClassifier(n_neighbors=k, n_jobs=8)\n knn.fit(X_train, Y_train)\n scores = knn.score(X_test, Y_test)\n k_scores.append(scores)\n\nplt.plot(k_range, k_scores)\nplt.xlabel('KNN_value')\nplt.ylabel('accuracy')\nplt.show()\n\n\n# print('Training accuracy: %0.2f%%' % (train_accuracy*100))\n# print('Testing accuracy: %0.2f%%' % (test_accuracy*100))\n# joblib.dump(clf, 'KNN_3_model.pkl')","sub_path":"game2048/KNN_training.py","file_name":"KNN_training.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"406463575","text":"import requests\nfrom nextcord.ext import commands\nfrom nextcord.message import Message\nfrom src.bot.bot import Bot\n\n\nclass Owner(commands.Cog):\n def __init__(self, bot: Bot):\n self.bot = bot\n\n @commands.command(name='setPermissions', hidden=True, aliases=['setp'])\n @commands.is_owner()\n async def setPermissions(self, ctx: commands.Context, arg: str, id: int, rights: int):\n # Change a users rights or guilds rights\n arg = arg.lower().strip(' <>!@')\n if arg in ['userrights', 'u_permissions', 'u_p']:\n requests.patch(f'{self.bot.base_api_url}discord/user/',\n params={'id': ctx.guild.id, 'privileges': rights}, headers=self.bot.header)\n await ctx.send(f'Set user privileges level for {id} to {rights}')\n elif arg in ['guildrights', 'g_permissions', 'g_p']:\n requests.patch(f'{self.bot.base_api_url}discord/guild/',\n params={'id': ctx.guild.id, 'privileges': rights}, headers=self.bot.header)\n await ctx.send(f'Set guild privileges for {id} to {rights}')\n\n @commands.command(name='setStatus', hidden=True)\n @commands.is_owner()\n async def set_status(self, ctx: commands.Context, _type: str = '', message: str = ''):\n if not _type:\n await ctx.send('Choose between '\n '\\'s\\' - streaming, '\n '\\'p\\' - playing, '\n '\\'w\\' - watching and '\n '\\'l\\' - listening to')\n elif not message:\n await ctx.send('You have to choose a message after the selected type of status.')\n else:\n await self.bot.set_status(_type, message)\n await ctx.message.add_reaction('🐸')\n\n @commands.command(name='guilds', hidden=True)\n @commands.is_owner()\n async def _guilds(self, ctx: commands.Context, arg: str = ''):\n if arg == '':\n await ctx.send(f'I am currently in {len(self.bot.guilds)} guilds.')\n elif arg == 'new':\n await ctx.send(f'This function is currently being built...')\n elif arg == 'voice':\n await ctx.send('All guilds I am currently connected to:')\n message = ''\n n = 50\n for i, guild_id in enumerate(self.bot.voice_states):\n guild = self.bot.get_guild(guild_id)\n message += f'{i+1}. \\'{guild.name}\\', {guild.member_count}\\n'\n\n if i > n:\n n += 50\n await ctx.send(f'```{message}```')\n message = ''\n if message:\n await ctx.send(f'```{message}```')\n else:\n await ctx.send('All guilds I am currently in:')\n message = ''\n n = 50\n for i, guild in enumerate(sorted(self.bot.guilds, key=lambda guild: guild.member_count, reverse=True)):\n message += f'{i+1}. \\'{guild.name}\\', {guild.member_count}\\n'\n\n if i > n:\n n += 50\n await ctx.send(f'```{message}```')\n message = ''\n if message:\n await ctx.send(f'```{message}```')\n","sub_path":"src/cogs/owner.py","file_name":"owner.py","file_ext":"py","file_size_in_byte":3204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"360723015","text":"#!/usr/bin/env python\n\"\"\" suite builder example for 2013 ecFlow course \"\"\"\n# file deepcode ignore missing~close~open: legacy ignore\nimport sys, os\nsys.path.append('/home/ma/emos/def/ecflow')\n\nimport ecf as ec\nfrom ecf import *\nimport inc_emos as ie # provides Seed class + system related dependencies \n\n# cd ~map/course/201303/ecflow; python course.py # task wrappers underneath\n# consume: choices for a family matching the producer-consumer pattern\n# local family + remote family + BEWARE ECF_OUT + log-server example\n# barber: an example of a \"dynamical suite\", with a \"family producer task\"\n# perl + python: example that task wrapper may not be ksh/bash scripts\n\nimport time\nfrom datetime import date\nimport argparse\n\ntoday = str(date.today()).replace('-', '')\n\n############################################################################\n\nclass GenericFamily(object):\n \"\"\" provide structure for derived classes\"\"\"\n\n def make(self, node): \n return node\n def main(self, node): \n return BaseException # return node\n def arch(self, node): \n return node\n\n############################################################################\n\nclass NativePerl(GenericFamily):\n \"\"\" ksh is not the only language for task wrappers\"\"\"\n\n def __init__(self):\n self.name = \"perl\"\n\n def main(self, node):\n tsk = Task(self.name).add(\n Variables(ECF_MICRO= \"^\",\n ECF_JOB_CMD= \"^ECF_JOB^ > ^ECF_JOBOUT^ 2>&1\"),\n Meter(\"step\", -1, 100),\n Event(\"1\"),\n Event(\"2\"),\n Label(\"info\", \"none\"), \n )\n node.add(tsk)\n\n############################################################################\n\nclass NativePython(NativePerl):\n \"\"\" ksh is not the only language for task wrappers\"\"\"\n\n def __init__(self):\n super(NativePerl, self).__init__()\n self.name = \"python\"\n\n############################################################################\n\ndef _kind(prod=1, cons=1): return Variables(CONSUME= cons, PRODUCE=prod)\ndef _evt(): return (Event(\"p\"), Event(\"c\"))\ndef _leaf(name=\"produce\", init=0, stop=100, step=1):\n add = None\n if type(stop) == int:\n add = Meter(\"step\", -1, int(stop))\n return Task(\"%s\" % name).add(\n _evt(),\n add,\n Variables(INIT= init,\n STOP= stop,\n STEP= step),) \n\n############################################################################\nclass FallBack(GenericFamily): \n \"\"\" in some situation, user may want its family to continue, and\n repeat to increment, even when some tasks abort \"\"\"\n \n def main(self, node=None):\n return Family(\"daily\").repeat(name=\"YMD\", \n start=today, end=DATE_STOP).add(\n Task(\"action\").add(Time(\"10:00\")),\n Family(\"loop\").add(\n Time(\"11:00\"),\n Task(\"dummy\").add(\n TriggerImpossible(),\n Complete(\"1==1\"))),\n Task(\"fallback\").add(\n Label(\"info\", \"force complete when action is aborted\"),\n Time(\"10:55\"),\n Trigger(\"action eq aborted\"),\n Complete(\"action eq complete\")))\n\nclass DailyInc(GenericFamily): \n \"\"\" anopther method to have daily repeat increment, with aborted tasks\"\"\"\n\n def main(self, node=None):\n return Family(\"daily_inc\").repeat(name=\"YMD\", \n start=today, end=DATE_STOP).add(\n Label(\"info\", \"requeue will reset repeat attribute!\"),\n Complete(\"daily_inc/loop/dummy eq complete\"),\n Task(\"action\").add(Time(\"10:00\")),\n Family(\"loop\").add(\n Time(\"11:00\"),\n Task(\"dummy\").add(TriggerImpossible(),\n Complete(\"1==1\")))) \n\nclass Consume(GenericFamily): \n \"\"\" producer-consumer pattern can be implemented in many ways\"\"\"\n\n def __init__(self):\n self.init = 0\n self.stop = 48\n self.step = 3\n\n def main(self, node):\n \"\"\" pass the parent node, so that absolute paths can be used \n with triggers\"\"\" \n path = node.fullname()\n\n top = node.family(\"consume\").add(\n Variables(SLEEP= 10,\n PRODUCE= 1, # default: tasks will do both\n CONSUME= 1),\n Family(\"limits\").add(Defcomplete(),\n Limit(\"consume\", 7)),\n Task(\"leader\").add(\n Label(\"info\", \"set event to get produce1 leader\"),\n Event(\"1\"), # set/cleared by user\n Defcomplete()),\n\n # task does both, ie serial ###########################\n _leaf(\"produce\", self.init, self.stop, self.step).add(\n Label(\"info\", \"both produce and consume in a task\")),\n\n # meter will report about producer progress ###########\n Family(\"produce0\").add(\n Label(\"info\", \"only produce\"),\n _kind(1, 0),\n _leaf(\"produce\", self.init, self.stop, self.step)),\n\n # serialy produced, create a new task for each step ###\n Family(\"produce1\").add(\n _kind(1, 0),\n Label(\"info\", \"repeat, one job per step\"),\n _leaf(\"produce\", init=\"%STEP%\", stop=\"%STEP%\", step=1).add(\n Meter(\"step\", -1, 100)))\\\n .repeat(kind=\"integer\", name=\"STEP\",\n start=self.init,\n end =self.stop,\n step =self.step).add()\n )\n\n top.defstatus(\"suspended\")\n fam = Family(\"produce2\").add( # parallel\n _kind(1, 0),\n Label(\"info\", \"limited, one task per step, step by 3\"),\n Limit(\"prod\", 5),\n InLimit(\"produce2:prod\"))\n top.add(fam)\n for step in xrange(self.init, self.stop, self.step):\n fam.add(Family(\"%02d\" % step).add(\n Variables(STEP= step),\n _leaf(\"produce\", step, step, 1)))\n \n ######################\n lead = path + \"/consume/leader:1\"\n prod = path + \"/consume/produce\"\n \n top.add( ### trigger may be inside a task\n _leaf(\"consume\", self.init, self.stop, self.step).add(\n Label(\"info\", \"trigger may be inside a task\"),\n _kind(0, 1),\n InLimit(\"limits:consume\"),\n Variables(CALL_WAITER= 1,\n SLEEP= 3, # sleep less than producer\n TRIGGER_EXPRESSION= prod + \":step ge $step or \" +\n prod + \" eq complete\",)),\n Family(\"consume1\").add(\n Label(\"info\", \"explicit trigger, follow faster\"),\n _kind(0, 1),\n Trigger(\"(consume1:STEP lt %s1:STEP and %s) or \" % \n (prod, lead) + \n \"(consume1:STEP lt %s0/produce:step and not %s) or \" % \n (prod, lead) + # lt while both are repeat\n \"(%s1 eq complete and %s0 eq complete)\" %\n (prod, prod)\n ),\n InLimit(\"limits:consume\"),\n _leaf(\"consume\", \"%STEP%\", \"%STEP%\", 1),\n ).repeat(kind=\"integer\", name=\"STEP\", \n start=self.init, end=self.stop, step=self.step))\n \n fam = Family(\"consume2\").add( # parallel\n Label(\"info\", \"one task per step, step by three\"),\n _kind(0, 1),\n Limit(\"consume\", 5),\n InLimit(\"consume2:consume\"))\n top.add(fam)\n for step in xrange(self.init, self.stop, self.step):\n fam.add(Family(\"%02d\" % step).add(\n Variables(STEP = step),\n Trigger(\"(%02d:STEP le %s1:STEP and %s) or \" %\n (step, prod, lead) + \n \"(%02d:STEP le %s0/produce:step and not %s)\" %\n (step, prod, lead)),\n _leaf(\"consume\", init=step, stop=step, step=1)))\n\n############################################################################\n\nclass Barber(GenericFamily):\n \"\"\" a 'barber shop' example with families created by a task \"\"\"\n\n def _passer_by(self):\n \"\"\" generator \"\"\"\n return Task(\"passby\").add(\n Time(\"00:00 23:59 00:05\"),\n Variables(ID=0),\n Label(\"info\", \"\"),\n Label(\"rem\", \"this task alters its variable ID, \" +\n \"aliases won't work natively\"),\n InLimit(\"limits:passby\"))\n \n def _client(self, node, position):\n \"\"\" python version of the family created initialy\n attention: raw definition file is located in passby task wrapper\"\"\"\n path = node.fullname() + \"/limits\"\n fam = node.family(\"list\").family(\"%s\" % position).add(\n AutoCancel(1),\n Task(\"cut\").inlimit(path + \":barbers\"),\n Task(\"pay\").add(\n Trigger(\"cut eq complete\"),\n InLimit(path + \":barbers\"),\n InLimit(path + \":cashiers\")),\n Task(\"leave\").add(\n Label(\"info\", \"\"),\n Trigger([\"cut\", \"pay\"])))\n \n fam.defstatus(\"complete\")\n\n def _shop(self, node):\n fam = node.family(\"shop\").defstatus(\"suspended\").add(\n Variables(NB_CHAIRS= 4),\n Family(\"limits\").add(Defcomplete(),\n Limit(\"passby\", 1),\n Limit(\"barbers\", 2),\n Limit(\"cashiers\", 1)),\n self._passer_by(),\n )\n self._client(fam, 1),\n \n\n def main(self, node):\n self._shop(node)\n\n############################################################################\ndef user(): \n return os.getlogin()\n\ndef locate_scripts():\n pwd = os.getcwd()\n\n return Variables(\n ECF_HOME= \"/tmp/%s/ecflow/\" % user(), # pwd,\n ECF_FILES= pwd + \"/scripts\",\n ECF_INCLUDE= pwd + \"/include\", )\n\nDATE_STOP = 20300115\n\nclass Course(ie.Seed):\n \"\"\" host families together \"\"\"\n\n def __init__(self):\n super(Course, self).__init__()\n self.name = \"course\"\n\n def suite(self):\n \"\"\" define limits (empty) \"\"\"\n \n node = Suite(user())\n node.defstatus(\"suspended\").add(\n Variables(USER= user()),\n locate_scripts()) # \n self.top(node)\n \n fp = open(\"/tmp/%s/\" % user() + self.name + \".def\", \"w\")\n print >> fp, node\n return node\n\n def top(self, node):\n barber_shop = Barber()\n perl = NativePerl()\n python = NativePython()\n consume = Consume()\n\n with node.family(self.name) as node:\n node.add(FallBack().main(),\n DailyInc().main())\n barber_shop.main(node)\n perl.main(node)\n python.main(node)\n consume.main(node)\n return node\n\n###############################################################################\n\nclass Admin(Course):\n \"\"\" host newlog task + logsvr start/stop/check task\n -- server logs can be renewed with a ecflowview menu command also\n \"\"\"\n\n def __init__(self):\n self.name = \"admin\"\n\n def top(self, node):\n with node.family(\"admin\") as node:\n node.add(self.main()).repeat(name=\"YMD\", start=today, end=DATE_STOP)\n node.defstatus(\"suspended\")\n return node\n\n def main(self): \n \"\"\" return self contained Family/Task, without absolute node references\n or with relative path triggers\"\"\"\n remote_submit = \"rsh -l %USER% %HOST% %ECF_JOB% > %ECF_JOBOUT% 2>&1\"\n logpath = \"/home/ma/map/course/201303/ecflow\"\n return (\n Task(\"newlog\").add(\n Label(\"info\", \"renew server log-file\"),\n Time(\"08:00\")),\n \n Task(\"logsvr\").add(\n Defcomplete(),\n Variables(HOST= \"pikachu\",\n ECF_LOGPORT=9316,\n ECF_LOGPATH= logpath,\n ECF_LOGMAP= logpath + \":\" + logpath,\n ECF_JOB_CMD= remote_submit), \n Label(\"info\", \"(re)start the logsvr on HOST\"),\n Time(\"08:00\")),\n\n Family(\"loop\").add(\n Time(\"08:30\"),\n Family(\"dummy\").add(# TriggerImpossible(),\n Complete(\"1==1\"))))\n\n###############################################################################\n\nclass EcEvents(Admin):\n \"\"\" connecting to third party software as event generator to update\n a suite variable, and enable daily family run\n \"\"\"\n\n def top(self, node):\n node = node.family(\"ecevents\")\n node.add(\n Label(\"info\", \"use web... menu\"),\n Defcomplete(),\n Variables(\n URL= \"http://eccmd.ecmwf.int:8090/#Mainpanel\",\n ECF_URL_CMD= \"${BROWSER:=firefox} -remote 'openURL(%URL%\"))\n self.main(node)\n return node\n \n def main(self, node): \n for mode in [\"list\", \n \"register\", \"delete\", \n \"register_all\", \"delete_all\"]:\n added = None\n if \"list\" in mode:\n added = Label(\"regs\", \"\")\n\n fam = Family(mode).add(\n Variables(MODE= mode),\n Task(\"ecjobs\").add(Label(\"info\", \"none\"), \n added))\n node.add(fam)\n if \"_all\" in mode:\n fam.add(Variables(EVENT= \"_all_\"))\n elif mode in (\"register\", \"delete\"):\n fam.add(Variables(EVENT= \"an12h000\"),\n Label(\"info\", \"update EVENT variable\"))\n\n event = \"an00h000\"\n node.family(\"ref\").defstatus(\"complete\").add(\n Task(event).add(Variables(YMD= today),\n Label(\"YMD\", today)))\n\n node.family(\"user\").family(event).repeat(\n name=\"YMD\", start=today, end=DATE_STOP).add(\n Label(\"info\", \"extern cannot be used anymore for \" +\n \"intra-suite reference triggers\"),\n Variables(SLEEP= 1),\n Trigger(event + \":YMD le %s/ref/%s:YMD\" % (node.fullname(), event)),\n _leaf(\"consume\"))\n return node\n\n###############################################################################\n\nclass SerialTask(object):\n \"\"\" add trigger on the previous task \"\"\"\n\n def __init__(self):\n self.prev = None\n\n def add(self, name):\n fam = Family(name).add(\n Variables(MODE= name),\n Task(\"to_ecflow\"))\n if self.prev is not None:\n fam.add(Trigger(\"./%s eq complete\" % self.prev))\n self.prev = name\n return fam\n\nclass Reload(Admin):\n \"\"\" a simple task to download SMS content and translate it to ecFlow \"\"\"\n\n def top(self, node):\n node = node.family(\"reload\")\n node.add(\n Label(\"info\", \"from sms to ecFlow\"),\n Defcomplete(),\n Variables(\n URL= \"https://software.ecmwf.int/wiki/display/ECFLOW/Home\",\n ECF_URL_CMD= \"${BROWSER:=firefox} -remote 'openURL(%URL%\"),\n self.main())\n return node\n \n def main(self, node=None): \n fam = Family(\"reload\")\n serial = SerialTask()\n fam.add(\n serial.add(\"get\").add(\n Variables(\n SMS_SUITE= \"eras\",\n # SMS_PROG= 314159, SMS_NODE= \"localhost\",\n # SMS_PROG= 314199, SMS_NODE= \"marhaus\", # eras\n SMS_PROG= 314197, SMS_NODE= \"marhaus\", # eras2\n ),),\n serial.add(\"translate\"),\n serial.add(\"edit\"),\n serial.add(\"mail\"), \n serial.add(\"load\"),\n serial.add(\"bench\").add(Defcomplete()))\n if node is not None: \n return node.add(fam)\n return fam \n\n############################################################################\n\nif __name__ == '__main__':\n import cli_proc, sys\n parser = argparse.ArgumentParser(\n description=DESC,\n formatter_class=argparse.RawDescriptionHelpFormatter)\n\n parser.add_argument(\"--host\", default=\"localhost\",\n help= \"server hostname\")\n parser.add_argument(\"--port\", default=\"3141\",\n help= \"server port number\")\n\n if len(sys.argv) > 1:\n suites = { \"course\": Course(), } \n argv = cli_proc.process(suites, compare=False)\n sys.exit(0)\n args = parser.parse_args()\n\n clt = ec.Client(args.host, args.port)\n try:\n clt.ping()\n except:\n clt = ec.Client(\"localhost\", 1000 + os.geteuid())\n try:\n clt.ping()\n except:\n clt = ec.Client(\"localhost\", 31415)\n try:\n clt.ping()\n except:\n clt = ec.Client(\"localhost\", 3199)\n defs = ec.ecflow.Defs()\n\n course = Course()\n suite = course.suite().add(Reload().main())\n\n if 1: # enable/disable remote version of 'consume'\n rem = suite.family(\"remote\").add(\n Task(\"mk_remote\").add(\n Defcomplete(),\n Label(\"info\", \"do not forget to create directory structure\" +\n \"on remote host for job output creation\"),\n ie.onws(host = \"class01\")),\n Variables(ECF_HOME= \"/tmp/%s/ecflow\" % user()),\n ie.onws(host = \"class02\"))\n course.top(rem)\n\n # print clt.stats()\n\n Admin().top(suite)\n\n EcEvents().top(suite)\n\n defs.add_suite(suite)\n \n path = \"/%s\" % user()\n # path = \"/%s/course\" % user()\n # path = \"/%s/admin\" % user()\n # path = \"/%s/ecevents\" % user()\n # print defs\n \n clt.replace(path, defs)\n\n sys.exit (0) \n","sub_path":"Doc/sphinx-examples/cookbook/src/course.py","file_name":"course.py","file_ext":"py","file_size_in_byte":18197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"319754443","text":"#Numero = 932588880\n#Correo = Bl4ze89@gmail.com\n\n\n#def Sumar(parametro1, parametro2):\n# return parametro1 + parametro2\n\n\n#print(Sumar(2, 2))\n\n\ndef Multiplicar(num):\n return num*2\nprint (\"Esta es una funcion normal\", str(Multiplicar(2)))\n\n\n\ndoblar = lambda num: num*2\n\nprint(\"Esta es una funcion de orden superior\", str(doblar(2)))\n\nsumar = lambda p1, p2 : p1 + p2\n\nprint(sumar(2, 2))\n\n#Funcion Map\ndef add_five(x):\n return x + 5\n\n#nums = []\nnums = [11, 25, 34, 100, 23]\n\nresult = map(add_five, nums)\nlist()\ntuple()\ndict()\nprint(list(result))\n\n##Funcion Filter\nnums = [11, 25, 34, 100, 23]\n\ndef dividir(x):\n return x % 2 == 0\n\nresult = filter(lambda x: x % 2 == 0, nums)\n\n#result = filter(dividir, nums)\n\nprint(list(result))\n\n##\n\n# reduce\nfrom functools import reduce\n\nnums = [47, 11, 42, 13]\n #58 + 42 = \ndef sumar(x, y):\n return x + y \n\nresult = reduce(lambda x, y: x + y, nums)\n\nresult = reduce(sumar, nums)\n\nprint(result)\n\n#Funcion Compresora\n\nif 2 == 2:\n print (\"Hola Mundo\")\nelse:\n print(\"Hola Luna\")\n\nresult = \"Hola Mundo\" if 2 == 2 else \"Hola Luna\"\n\nlista = []\n\nfor valor in nums:\n lista.append(valor ** 3)\n\ncubos = [valor ** 3 for valor in nums]\n\nprint(list(cubos))\n\n\nfrom functools import reduce\n############# Ej. Tipo #####################\n#1 - Utilizar la función incorporada map() para crear una función que retorne una lista con la longitud de cada palabra (separas por espacios) de una frase. La función recibe una cadena de texto y retornara una lista. # .split()\n\"\"\"lista = []\nwhile len(lista) < 5 :\n variable = str(input(\"Ingresar palabra:\"))\n variable = variable.split(\" \")\n for y in variable:\n lista.append(y)\n\ndef contar_caracteres(x):\n return len(x)\n\nresult = map(contar_caracteres, lista)\n\nprint(list(result)) \"\"\"\n\n#2 - Crear una función que tome una lista de dígitos y devuelva al número al que corresponden. Por ejemplo [1,2,3] corresponde a el numero ciento veintitrés (123). Utilizar la función reduce.\nlista = []\nwhile len(lista) < 5 :\n variable = int(input(\"Ingresar Numero :\"))\n lista.append(variable)\n\nnum = ''\n\ndef concatenar_numeros(x, y):\n num = str(x) + str(y)\n return num\n\nresult = reduce(concatenar_numeros, lista)\n\nprint(result)\n\n\n#3 - Crear una función que retorne las palabras de una lista de palabras que comience con una letra en especifico. Utilizar la función filter.\n\nlista = []\nwhile len(lista) < 5 :\n variable = str(input(\"Ingresar palabra:\"))\n variable = variable.split(\" \")\n for y in variable:\n lista.append(y)\n\ndef palabras(x):\n return x.startswith(\"k\") \n\nresult = filter(palabras, lista)\n\nprint(list(result))\n\n\n# Decorador\n\ndef decorador(func):\n def nueva_funcion(): #C\n print(\"El perro dice :\") #D\n func() #E\n return nueva_funcion #B\n\n#Funcion decorada\n@decorador\ndef saluda(): #F\n print(\"Guau!\") #G\n\nsaluda() #A\n\n#Funcion decorada traducida\ndecorador(saluda())\n\ndef decorador(func):\n def nueva_funcion(parametro1, parametro2):\n print(\"=\" * 12)\n func(parametro1, parametro2)\n print(\"=\" * 12)\n return nueva_funcion\n\n\ndef suma(a, b):\n print(a + b)\n\nsuma_parametro = decorador(suma)\nsuma_parametro(2,4)\n\n@decorador\ndef suma(a, b):\n print(a + b)\n\nsuma(2, 4)\n","sub_path":"Python 3/Clase 2/python.py","file_name":"python.py","file_ext":"py","file_size_in_byte":3172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"369075876","text":"#! /usr/bin/env python3\n###############################################################################\n# \n# Project: dem_ssara.py\n# Author: Falk Amelung\n# Created: 3/2018\n#\n###############################################################################\n\n\nimport os\nimport sys\nimport glob\nimport time\nimport argparse\nimport warnings\nimport shutil\nimport subprocess\nimport _readfile as readfile\nimport messageRsmas\n\nimport argparse\n\nEXAMPLE = '''example:\n dem_rsmas.py $SAMPLES/GalapagosT128SenVVD.template\n\n uses sentinelStack.boundingBox to generate a dem in DEM folder as dem.py requires integer degrees\n\n options:\n sentinelStack.demMethod = ssara [default: bbox]\n\n subtracts/adds ` 0.5 degree and then rounds to full integer\n\n '-1 0.15 -91.3 -90.9' -- >'-2 1 -92 -90\n\n work for islands where zip files may be missing\n'''\n\n##########################################################################\n\n\ndef create_dem_parser():\n parser = argparse.ArgumentParser(\n description='Implementing SSARA, ISCE, and more? options in DEM.\\nDefault is to run ISCE')\n\n parser.add_argument(\n 'custom_template_file',\n nargs='?',\n help='custom template with option settings.\\n')\n parser.add_argument(\n '--ssara',\n dest='ssara',\n action='store_true',)\n parser.add_argument(\n '--new',\n dest='new',\n action='store_true',\n help='test option')\n\n return parser\n\n\ndef command_line_parse():\n parser = create_dem_parser()\n inps = parser.parse_args()\n inps.isce = True\n\n if inps.ssara:\n inps.isce = False\n return inps\n\n\ndef call_ssara_dem():\n print('you have started ssara!')\n\n print('now you are leaving ssara!')\n\n\ndef call_isce_dem(custom_template):\n\n bbox = custom_template['sentinelStack.boundingBox']\n bbox = bbox.strip(\"'\")\n south = round(float(bbox.split()[0])-0.5) # assumes quotes '-1 0.15 -91.3 -91.0'\n north = round(float(bbox.split()[1])+0.5)\n west = round(float(bbox.split()[2])-0.5)\n east = round(float(bbox.split()[3])+0.5)\n \n dembbox = str(int(south))+' '+str(int(north))+' '+str(int(west))+' '+str(int(east))\n\n # cmd = 'dem.py -a stitch -b '+demBbox+' -c -u https://e4ftl01.cr.usgs.gov/MEASURES/SRTMGL1.003/2000.02.11/'\n cmd = 'dem_rsmas_kawan.py -a stitch -b '+dembbox+' -c -u https://e4ftl01.cr.usgs.gov/MEASURES/SRTMGL1.003/2000.02.11/'\n messageRsmas.log(cmd)\n\n cwd = os.getcwd()\n\n try:\n output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True, universal_newlines=True)\n except subprocess.CalledProcessError as exc:\n print(\"Command failed. Exit code, StdErr:\", exc.returncode, exc.output)\n sys.exit('Error produced by dem.py')\n else:\n # print(\"Success. StdOut \\n{}\\n\".format(output))\n if 'Could not create a stitched DEM. Some tiles are missing' in output:\n os.chdir('..')\n shutil.rmtree('DEM')\n sys.exit('Error in dem.py: Tiles are missing. Ocean???')\n\n xmlfile = glob.glob('demLat_*.wgs84.xml')[0]\n fin = open(xmlfile, 'r')\n fout = open(\"tmp.txt\", \"wt\")\n for line in fin:\n fout.write(line.replace('demLat', cwd+'/demLat'))\n fin.close()\n fout.close()\n os.rename('tmp.txt', xmlfile)\n\n\ndef main(argv):\n # import pdb; pdb.set_trace()\n\n messageRsmas.log(' '.join(argv))\n inps = command_line_parse()\n # moved below to parse methods\n # parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter,\n # epilog=EXAMPLE)\n # parser.add_argument('custom_template_file', nargs='?',\n # help='custom template with option settings.\\n')\n # inps = parser.parse_args()\n\n custom_template = readfile.read_template(inps.custom_template_file)\n\n # import pdb;\n # pdb.set_trace()\n if os.path.isdir('DEM'):\n shutil.rmtree('DEM')\n os.mkdir('DEM')\n os.chdir('DEM')\n\n # cwd = os.getcwd()\n\n if 'sentinelStack.demMethod' not in custom_template.keys():\n custom_template['sentinelStack.demMethod'] = 'bbox'\n\n if custom_template['sentinelStack.demMethod'] == 'bbox' and inps.ssara:\n call_ssara_dem()\n if inps.new:\n print('nice job kawan! You aren\\' dumb!')\n if custom_template['sentinelStack.demMethod'] == 'bbox' and inps.isce:\n print('you started isce')\n call_isce_dem(custom_template)\n print('you finished isce')\n \n else:\n sys.exit('Error unspported demMethod option: '+custom_template['sentinelStack.demMethod'])\n \n print('\\n###############################################')\n print('End of dem_rsmas.py')\n print('################################################\\n')\n\n###########################################################################################\n\n\nif __name__ == '__main__':\n main(sys.argv[:])\n","sub_path":"dem_rsmas_kawan.py","file_name":"dem_rsmas_kawan.py","file_ext":"py","file_size_in_byte":4900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"595692695","text":"#! /usr/bin/env python3\n\nname = \"GPDVC15\"\n\nimport time\nimport sys\nimport ogameasure\nimport rospy\nfrom std_msgs.msg import Float64\nfrom std_msgs.msg import String\nfrom std_msgs.msg import Int32\n\nclass GPDVC15_100(object):\n\n def __init__(self):\n\n self.loatt = []\n for i in gpibport_list:\n gpibport = i\n com = ogameasure.gpib_prologix(host, gpibport)\n lo = ogameasure.ELVA1.GPDVC15.GPDVC15_100(com)\n lo.com.close()\n self.loatt.append(lo)\n time.sleep(60)\n\n for i, port in enumerate(gpibport_list):\n topic = \"/dev/gpdvc15_100rs/__IP__/port_%d/i_cmd\"%(port)\n sub = rospy.Subscriber(topic, Float64, self.set_output, callback_args=i)\n time.sleep(60)\n\n\n def set_output(self,q,args):\n lo = self.loatt[args]\n lo.com.open()\n lo.output_set(q.data)\n lo.com.close()\n return\n\n\nif __name__ == \"__main__\" :\n rospy.init_node(name)\n host = rospy.get_param(\"~host\")\n gpibport_list = eval(rospy.get_param(\"~gpibport_list\"))\n att = GPDVC15_100()\n rospy.spin()\n","sub_path":"scripts/GPDVC15_100.py","file_name":"GPDVC15_100.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"635354836","text":"\"\"\"\"Даннный модуль создан для подсчета площади, которое нужно покрыть обоями в комнате и для подсчета обоев\"\"\"\r\n\r\nclass Room:\r\n \"\"\"Класс Room. Для данного класса существуют 3 обязательные переменные: длина, ширина и высота\r\n здесь же происходит и подсчет \"основной\" площади комнаты, без вычета окон и дверей\"\"\"\r\n def __init__(self, width, length, height):\r\n self.width = width\r\n self.length = length\r\n self.height = height\r\n self.square = 2 * self.height * (self.length + self.width)\r\n self.wd = []\r\n \"\"\"\"Данная функция класса Room вводит перменные (ширина и высота) для класса, определяющего окна и двери (WinDoor),\r\n далее вводя их площадь в список\"\"\"\r\n def addWD(self, w, h):\r\n self.wd.append(WinDoor(w, h))\r\n \"\"\"Данная функция класса Room отнимает от общей площади комнаты площадь окон и дверей\"\"\"\r\n def workSurface(self):\r\n self.new_square = self.square\r\n for i in self.wd:\r\n self.new_square -= i.square\r\n return self.new_square\r\n \"\"\"Данная функция класса Room нужна для расчета количества клеемых обоев, вводимые перменные -- ширина и длина\"\"\"\r\n def getWPArea(self):\r\n print(\"Now, let's count amount of wallpapers you need!\")\r\n WPwidth = input('Enter the width of the wallpaper: ')\r\n WPlength = input('Enter the length of the wallpaper: ')\r\n try:\r\n self.WPwidth = int(WPwidth)\r\n self.WPlength = int(WPlength)\r\n except ValueError:\r\n print(\"You didn't enter the number\")\r\n WParea = self.WPlength * self.WPwidth\r\n WPamount = self.new_square/WParea\r\n return WPamount\r\n\r\nclass WinDoor:\r\n \"\"\"Класс WinDoor, расчитывающий длину и ширину окон/дверей, перменные -- ширина и высота\"\"\"\r\n def __init__(self, width, lenght):\r\n self.square = width * lenght\r\n\r\n \"\"\"Функция, в которую ввод��т перменные ширины и высоты для окон и дверей\"\"\"\r\ndef setWinDoorArea(room):\r\n width = input('Enter the width of the door or window: ')\r\n length = input('Enter the length of the door or window: ')\r\n try:\r\n width = float(width)\r\n length = float(length)\r\n except ValueError:\r\n print(\"You didn't enter the number\")\r\n setArea()\r\n room.addWD(width, length)\r\n\r\n \"\"\"Функция-интерфейс, в которой вводятся данные о пространстве команты: ширина, длина, высота\"\"\"\r\ndef setArea():\r\n width = input('Enter the width of the room: ')\r\n length = input('Enter the length of the room: ')\r\n height = input('Enter the height of the room: ')\r\n try:\r\n width = float(width)\r\n length = float(length)\r\n height = float(height)\r\n except ValueError:\r\n print(\"You didn't enter the number\")\r\n setArea()\r\n r1 = Room(width, length, height)\r\n print(\"The square of room is: %.2f\" % (r1.square))\r\n print(\"Okay, let's take off area you don't need to cover, like a window or a door.\")\r\n i = int(input('How many doors and windows do you have in your room? '))\r\n j = 0\r\n while i > j:\r\n setWinDoorArea(r1)\r\n j += 1\r\n area = r1.workSurface()\r\n WP = r1.getWPArea()\r\n print('The workspace of the room is %.2f\\nAmount of wallpapers is: %.2f' % (area, WP))\r\n","sub_path":"composition.py","file_name":"composition.py","file_ext":"py","file_size_in_byte":3912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"200208348","text":"import math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Function\nimport ftcnn_function\n\n\nclass FtFunction(Function):\n r\"\"\"\n args:\n out_channels : int\n in_channels : int\n stride : (s,s) tuple\n padding : (p,p) tuple\n groups : 1\n \"\"\"\n\n def __init__(self, out_channels, in_channels, stride, padding, dilation, groups):\n super(FtFunction, self).__init__()\n self.out_channels = out_channels\n self.in_channels = in_channels\n self.stride = stride\n self.padding = padding\n self.dilation = dilation\n self.groups = groups\n\n def forward(self, input, kernel, bias):\n self.save_for_backward(input, kernel, bias)\n output = ftcnn_function.forward(input, kernel, bias, self.stride, self.padding,\n self.dilation, self.out_channels, self.in_channels, self.groups)\n return output\n\n def backward(self, grad_output):\n input, kernel, bias = self.saved_variables\n output_mask = [True, True, True]\n grad_input, grad_weight, grad_bias = ftcnn_function.backward(input, grad_output, kernel, bias,\n self.stride, self.padding, self.dilation,\n self.out_channels, self.in_channels,\n self.groups, output_mask)\n return grad_input, grad_weight, grad_bias\n\n\ndef ftFunction(input, kernel, out_channels, in_channels, bias, stride, padding, dilation, groups):\n r\"\"\"\n you dont need to change kernel's shape.Remember to take the `out_channels` and `in_channels` into function.\n Sure, it does not support CPU (CPU is not supported due to backward):>.\n Please use GPU to training the model.\n Some codes in `tfcnn_function.cpp` are useless for you ,I think so.\n args:\n input : (batch,c,h,w)\n kernel : (batch,c,size_h,size_w)\n bias : (out_channels)\n \"\"\"\n return FtFunction(out_channels, in_channels, stride, padding, dilation, groups)(input, kernel, bias)\n","sub_path":"net/ft_function.py","file_name":"ft_function.py","file_ext":"py","file_size_in_byte":2245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"11971587","text":"def find(r, v):\n for i in range(0, len(r)):\n if v > r[i]:\n return i\n return len(r)\n\ndef sort(a):\n result =[]\n while a:\n value = a.pop(0)\n insert_value = find(result, value)\n result.insert(insert_value, value)\n return result\n\n\nd = [2, 4, 5, 1, 3]\nprint(sort(d))\n","sub_path":"풀이/9/연습문제9-1.py","file_name":"연습문제9-1.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"102206873","text":"\"\"\"\nThis module provides the base Transformation class, from which all\ntransformation schemas inherit.\n\"\"\"\n__all__ = ['Transformation', 'register']\n\nfrom typing import Any, Dict, List, Type, Iterator, Callable\nimport logging\n\nfrom ..exceptions import NameInUseException, \\\n UnknownTransformationSchemaException\nfrom ..problem import Problem\nfrom ..snippet import SnippetDatabase\nfrom ..core import Replacement, FileLine\n\nlogger = logging.getLogger(__name__) # type: logging.Logger\nlogger.setLevel(logging.DEBUG)\n\n\"\"\"\nMaintains a registry of transformation schemas indexed by name.\n\"\"\"\nREGISTRY = {} # type: Dict[str, Type[Transformation]]\n\n\nclass Transformation(object):\n \"\"\"\n Represents a transformation to a source code file.\n \"\"\"\n def to_replacement(self, problem: Problem) -> Replacement:\n \"\"\"\n Converts a transformation into a concrete source code replacement.\n \"\"\"\n raise NotImplementedError\n\n @staticmethod\n def find_schema(name: str) -> 'Type[Transformation]':\n \"\"\"\n Retrieves the transformation schema that is registered under a given\n name.\n\n Raises:\n KeyError: if no schema is found under that name.\n \"\"\"\n return REGISTRY[name]\n\n @staticmethod\n def schemas() -> Iterator[str]:\n \"\"\"\n Returns an iterator over the names of the transformation schemas\n that have been registered.\n \"\"\"\n yield from REGISTRY\n\n @staticmethod\n def from_dict(d: Dict[str, Any]) -> 'Transformation':\n \"\"\"\n Constructs a transformation from a dictionary-based description.\n\n Raises:\n SyntaxError: if the provided description is not well formed.\n UnknownTransformationSchemaException: if the schema used by the\n transformation has not been registered or does not exist.\n\n Returns:\n the transformation that corresponds to the given description.\n \"\"\"\n try:\n kind = d['kind']\n except KeyError:\n msg = \"expected 'kind' property in transformation description\"\n raise SyntaxError(msg)\n\n try:\n schema = REGISTRY[kind]\n except KeyError:\n raise UnknownTransformationSchemaException(kind)\n\n return schema.from_dict(d)\n\n @classmethod\n def all_at_lines(cls,\n problem: Problem,\n snippets: SnippetDatabase,\n lines: List[FileLine],\n *,\n threads: int = 1\n ) -> Dict[FileLine, Iterator['Transformation']]:\n \"\"\"\n Returns a dictionary from lines to streams of all the possible\n transformations of this type that can be performed at that line.\n \"\"\"\n raise NotImplementedError\n\n def to_dict(self) -> Dict[str, Any]:\n d = self._to_dict()\n d['kind'] = self.__class__.NAME # type: ignore\n return d\n\n def _to_dict(self) -> Dict[str, Any]:\n raise NotImplementedError\n\n\ndef register(name: str\n ) -> Callable[[Type[Transformation]], Type[Transformation]]:\n \"\"\"\n Registers a given transformation schema under a provided name.\n\n Raises:\n NameInUseException: if the given name is being used by another\n transformation schema.\n \"\"\"\n def decorator(schema: Type[Transformation]) -> Type[Transformation]:\n logger.debug(\"registering transformation schema [%s] under name [%s]\",\n schema, name)\n global REGISTRY\n if name in REGISTRY:\n raise NameInUseException\n\n # TODO class must implement a \"from_dict\" method\n\n schema.NAME = name # type: ignore\n REGISTRY[name] = schema # type: ignore\n logger.debug(\"registered transformation schema [%s] under name [%s]\",\n schema, name)\n return schema\n\n return decorator\n","sub_path":"src/darjeeling/transformation/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":3948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"311002847","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\nimport requests\nfrom lxml.etree import HTML\nimport time\nfrom selenium import webdriver\n\nheaders = {\n 'Accept': \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3\",\n 'Accept-Encoding': \"gzip, deflate, br\",\n 'Accept-Language': \"zh-CN,zh;q=0.9\",\n 'Cache-Control': \"no-cache\",\n 'Connection': \"keep-alive\",\n 'Content-Length': \"86\",\n 'Content-Type': \"application/x-www-form-urlencoded\",\n 'Cookie': \"ASP.NET_SessionId=rkqwkroam01j4lugrzdf1se3;avatarId=d40fc5f1-b49d-46aa-85f9-340ab660951b-;bbsmax_user=62696d4f-e58d-4b75-a277-67dfba6d42bc;CNZZDATA5082706=cnzz_eid%3D284383500-1558691792-%26ntime%3D1558691792;UM_distinctid=16ae9624524f1-00c9ab71224a57-37647e04-1fa400-16ae962452a2cb;qHistory=c2VhcmNocy8r5om56YeP5p+l6K+i;\",\n 'Host': \"icp.chinaz.com\",\n 'Origin': \"https://icp.chinaz.com\",\n 'Pragma': \"no-cache\",\n 'Referer': \"https://icp.chinaz.com/searchs\",\n 'Upgrade-Insecure-Requests': \"1\",\n 'User-Agent': \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Safari/537.36\",\n 'cache-control': \"no-cache\",\n}\n\ndef start(search_list,count):\n for each in search_list:\n with open('已经爬取过的链接.txt','a') as f:\n f.write(each+'\\n')\n\n print('当前条数:'+str(count))\n # print(search_list)\n url = 'https://icp.chinaz.com/searchs'\n urls = '%0D%0A'.join(search_list)\n data = 'urls='+urls+'&btn_search=%E6%9F%A5%E8%AF%A2'\n print(data)\n response = requests.post(url, headers=headers, data=data,timeout=16)\n\n html = HTML(response.text)\n tr_list = html.xpath('//tbody[@id=\"result_table\"]/tr')\n for tr in tr_list:\n searchUrl = tr.xpath('string(.//td[1])')\n resType = tr.xpath('string(.//td[3])')\n if resType != '--':\n print(searchUrl,resType)\n with open(resType+'.txt','a') as f:\n f.write(searchUrl+'\\n')\n\ndef login():\n try:\n driver = webdriver.Chrome()\n url = 'https://icp.chinaz.com/searchs'\n driver.get(url)\n except:\n print('启动谷歌浏览器失败')\n time.sleep(120)\n exit()\n\n print('请登录')\n flag = False\n while True:\n headers['Cookie'] = ''\n cookies = driver.get_cookies()\n cookieName_list = []\n for cookie in cookies:\n\n thisStr = cookie['name'] + '=' +cookie['value'] +';'\n headers['Cookie'] += thisStr\n\n cookieName_list.append(cookie['name'])\n print(headers['Cookie'])\n\n # if cookie['name'] == 'thor':\n # headers['Cookie'] = 'thor=' + cookie['value']\n\n if 'UM_distinctid' in cookieName_list and 'qHistory' in cookieName_list and 'ASP.NET_SessionId' in cookieName_list and 'CNZZDATA5082706' in cookieName_list and 'bbsmax_user' in cookieName_list and 'avatarId' in cookieName_list:\n print('已登录...')\n flag = True\n break\n\n if flag:\n break\n print('未检测到登录cookie。。')\n time.sleep(3)\n\nif __name__ == '__main__':\n\n login()\n\n have_set = set()\n with open('已经爬取过的链接.txt','r') as f:\n results = f.readlines()\n for res in results:\n have_set.add(res.strip())\n\n url_list = []\n with open('网址.txt') as f:\n results = f.readlines()\n for res in results:\n if res not in have_set:\n url_list.append(res.strip())\n print('剩余爬取条数:'+str(len(url_list)))\n\n count = 0\n num = 0\n search_list = []\n for url in url_list:\n count +=1\n if num == 100:\n try:\n start(search_list,count)\n except:\n pass\n num = 0\n search_list = []\n search_list.append(url)\n num +=1\n\n start(search_list)","sub_path":"other/chinaz/chinaz.py","file_name":"chinaz.py","file_ext":"py","file_size_in_byte":3952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"309660988","text":"from django.core.management.base import BaseCommand, CommandError\n\nfrom activities import tasks\nfrom activities.models import Activity, Collection, Institution\n\nclass Command(BaseCommand):\n help = 'Generate thumnails for all activity objects'\n\n def handle(self, *args, **options):\n for activity in Activity.objects.all():\n self.stdout.write('Activity \"%s\"... ' % activity.code, ending='')\n tasks.make_thumbnail(activity)\n self.stdout.write('done.')\n for collection in Collection.objects.all():\n self.stdout.write('Collection \"%s\"... ' % collection.slug, ending='')\n tasks.make_thumbnail(collection)\n self.stdout.write('done.')\n for institution in Institution.objects.all():\n self.stdout.write('Institution \"%s\"... ' % institution.slug, ending='')\n tasks.make_thumbnail(institution)\n self.stdout.write('done.')\n\n","sub_path":"activities/management/commands/generate_thumbs.py","file_name":"generate_thumbs.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"382218246","text":"#!/bin/usr/env python\n# -*- coding: utf-8 -*-\nimport sys\n\n\ndef get_parse():\n args = sys.argv\n\n if(len(args) < 4):\n print('usage: $ script.py target_directory [inc|dec] N')\n exit()\n\n targ_dir = args[1]\n inc_type = args[2]\n num = int(args[3])\n\n if not inc_type in ['inc', 'dec']:\n print('Error: inc_type is wrong.')\n exit()\n\n args_dict = {}\n args_dict['targ_dir'] = targ_dir\n args_dict['inc_type'] = inc_type\n args_dict['num'] = num\n\n return args_dict\n\n\ndef main():\n return\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"source/IO.py","file_name":"IO.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"171272585","text":"#!/usr/bin/env python3\n\"\"\"\nFunctions used to fit the models for evaluation.\n\n\"\"\"\nimport pandas\nimport numpy as np\nimport matplotlib\nfrom matplotlib import pyplot as plt\nfrom sklearn.linear_model import LinearRegression, Lasso, Ridge\nfrom sklearn.preprocessing import PolynomialFeatures\nfrom sklearn.neural_network import MLPRegressor\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.metrics import r2_score\nfrom scipy import constants\n\nmatplotlib.rcParams.update({'font.size': 24})\n\nexampleseed = 3232043\n\ndef fit_and_test_models(randomseed):\n #load the data from previous script\n training = pandas.read_csv(\"./training_data.csv\", parse_dates=True, index_col=0)\n test = pandas.read_csv(\"./test_data.csv\", parse_dates=True,index_col=0)\n\n #rescale pressure to atm and remove 1 atm to scale/center it better\n training[\"Air pressure\"] = (training[\"Air pressure\"]*100.0/constants.atm)-1\n test[\"Air pressure\"] = (test[\"Air pressure\"]*100.0/constants.atm)-1\n \n #separate predictors and target variable\n training_x = training.drop(\"T\", axis=1).values\n training_y = training[\"T\"].values\n\n test_x = test.drop(\"T\", axis=1).values\n test_y = test[\"T\"].values \n\n models = {\"Linear\":LinearRegression(), \"Ridge\":Ridge(alpha=0.1), \"Lasso\":Lasso(alpha=0.1),\n \"Neural\":MLPRegressor(hidden_layer_sizes=10, alpha=0.1, early_stopping=True, max_iter=3000, random_state=randomseed),\n \"QuadraticLasso\":make_pipeline(PolynomialFeatures(2), Lasso(alpha=0.1, max_iter=5000))}\n\n prediction_error = pandas.DataFrame(test[\"T\"])\n for label,model in models.items():\n model.fit(training_x, training_y)\n prediction_error[label] = np.abs(test_y - model.predict(test_x))\n \n \n scores = {label:[model.score(test_x, test_y), np.mean(prediction_error[label]), np.max(prediction_error[label])] for label,model in models.items()}\n level_difference = np.abs(test[\"T lower\"] - test[\"T\"])\n scores[\"substitution\"] = [r2_score(test[\"T\"], test[\"T lower\"]), np.mean(level_difference), np.max(level_difference)]\n\n #Testing our hypothesis = \" Model better than linear interpolation for large gaps\"\n #Let's do ten repeats of each gap percentage to get some statistics\n\n error_chances = np.linspace(0.1,1, num=20)\n repeats = 10\n\n comparison_to_interpolation = {i:against_interpolation(models, test, e, prediction_error, repeats) for i,e in enumerate(error_chances)}\n\n return models, prediction_error, scores, comparison_to_interpolation, error_chances\n\n\ndef against_interpolation(models, data, error_chance, prediction_errors, repeats):\n\n #can't remove endpoints, or it is not technically interpolation anymore\n datapoints = data.shape[0]-2\n indices = np.full(datapoints+2, False, dtype=bool)\n \n stats = {\"mean\":np.mean, \"max\":np.max, \"std\":np.std}\n \n errors = {model:{name:np.zeros(repeats) for name in stats} for model in models}\n errors[\"Interpolation\"] = {name:np.zeros(repeats) for name in stats}\n \n for i in range(repeats):\n \n #choose datapoints to keep with chance 1-error_chance \n indices[1:-1] = np.random.choice([True, False], size=datapoints, replace=True, p = [error_chance, 1-error_chance])\n\n fragmented_data = data.copy()\n fragmented_data[indices] = np.nan\n #also slinear should work correctly, not necessarily evenly spaced data\n fragmented_data.interpolate(\"time\", inplace=True)\n\n #calculate l2-distance at indices, and reducing functions\n interpolation_errors = np.abs(fragmented_data[\"T\"][indices]-data[\"T\"][indices])\n for m in errors:\n if m == \"Interpolation\":\n for name,method in stats.items():\n errors[m][name][i] = method(interpolation_errors)\n else:\n for name,method in stats.items():\n errors[m][name][i] = method(prediction_errors[m][indices])\n\n\n\n #and since we don't really care about the individual runs, reduce again over the repeats\n #In retrospect, there might be a simpler datastructure and a way to do this with pandas\n reduced_error_stats = {model:{s:{method_name:method(values) for method_name,method in stats.items()} for s,values in d.items()} for model,d in errors.items()}\n\n \n return reduced_error_stats\n\n\ndef make_easily_plottable_form(error_stats,error_rates):\n \"\"\"\n I have to plot the errors from the resulting error data structure.\n So actually I want them to be simple arrays, which requires some reorganisation.\n ...\n If this was something I'd use constatly I would rewrite the error calculation to produce a nicer format directly,\n but since this was to be a one time script, this utility function works as well. It is not very elegant though. \n \"\"\"\n\n model_stats = {model:{name:np.zeros_like(error_rates) for name in [\"max\", \"mean\", \"mean std\", \"max std\"]} for model in error_stats[0]}\n \n for i,e in enumerate(error_rates):\n for model,results in error_stats[i].items():\n model_stats[model][\"max\"][i] = results[\"max\"][\"mean\"]\n model_stats[model][\"max std\"][i] = results[\"max\"][\"std\"]\n model_stats[model][\"mean\"][i] = results[\"mean\"][\"mean\"]\n model_stats[model][\"mean std\"][i] = results[\"mean\"][\"std\"]\n\n\n return model_stats\n\n\ndef plot_interpolation_comparison(error_stats, error_rates):\n model_stats = make_easily_plottable_form(error_stats, error_rates)\n \n #Since most of the linear models have extremely similar behaviour\n #I will only plot lasso, Quadratic Lasso and the Neural network in the comparison.\n\n included_models = [\"Interpolation\", \"Lasso\", \"QuadraticLasso\", \"Neural\"]\n\n fig1, ax1 = plt.subplots()\n fig2, ax2 = plt.subplots()\n for model in included_models:\n ax1.errorbar(error_rates, model_stats[model][\"mean\"],\n yerr=model_stats[model][\"mean std\"],\n marker='x', linestyle='--', capsize=6, label=model)\n ax1.set_title(\"Mean\")\n ax1.set_ylabel(\"K\")\n ax1.set_xlabel(\"Fraction missing\")\n \n ax2.errorbar(error_rates, model_stats[model][\"max\"],\n yerr=model_stats[model][\"max std\"],\n marker='x', linestyle='--', capsize=6, label=model)\n ax2.set_title(\"Max\")\n ax2.set_ylabel(\"K\")\n ax2.set_xlabel(\"Fraction missing\")\n \n \n ax1.set_yscale(\"log\")\n ax1.legend(loc=\"best\")\n ax2.set_yscale(\"log\")\n ax2.legend(loc=\"best\")\n\n plt.show()\n \n","sub_path":"train_and_test_models.py","file_name":"train_and_test_models.py","file_ext":"py","file_size_in_byte":6561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"207609547","text":"# -*- coding: utf-8 -*-\n\nfrom wsgiref.simple_server import make_server\nfrom pyramid.config import Configurator\nfrom pyramid.response import Response\nfrom authomatic import Authomatic\nfrom authomatic.adapters import WebObAdapter\nfrom authomatic.providers import oauth2\n\nCONFIG = {\n \n 'dp': {\n \n 'class_': oauth2.Dataporten,\n \n 'consumer_key': 'client-id-from-dashboard.dataporten.no',\n 'consumer_secret': 'client-secret-from-dashboard.dataporten.no'\n }\n}\n\nsecret = 'ergeresf' # Used for signing session cookies and salting CSRF tokens\nauthomatic = Authomatic(config=CONFIG, secret=secret)\n\n\ndef login(request):\n response = Response()\n result = authomatic.login(WebObAdapter(request, response), 'dp')\n if result:\n # If there is a result, the login procedure is over and we can write to response.\n response.write('Home')\n \n if result.error:\n response.write(u'Login failed: {0}
'.format(result.error.message))\n elif result.user:\n # OAuth 2.0 provides only limited user data on login,\n # We need to update the user to get more info.\n if not (result.user.name and result.user.id):\n result.user.update()\n \n response.write(u'Hi {0}
'.format(result.user.name))\n response.write(u'Your id is: {0}
'.format(result.user.id))\n response.write(u'Your email is: {0}
'.format(result.user.email))\n \n return response\n\n\ndef home(request):\n return Response('''\n Login with Dataporten.
\n ''')\n\n\nif __name__ == '__main__':\n config = Configurator()\n \n config.add_route('home', '/')\n config.add_view(home, route_name='home')\n \n config.add_route('login', '/login')\n config.add_view(login, route_name='login')\n \n app = config.make_wsgi_app()\n server = make_server('127.0.0.1', 8080, app)\n server.serve_forever()\n","sub_path":"examples/pyramid/dataporten/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"2880105","text":"from __future__ import division\r\nimport os\r\nimport time\r\nfrom sklearn.metrics import mean_absolute_error\r\nimport scipy.io as sio\r\nfrom glob import glob\r\nimport tensorflow as tf\r\nimport numpy as np\r\nfrom six.moves import xrange\r\nimport csv\r\nfrom ops_ import *\r\nfrom utils_ import *\r\nfrom sklearn.metrics import mean_squared_error\r\nfrom math import sqrt\r\nfrom sklearn.model_selection import KFold\r\nfrom sklearn import preprocessing\r\nfrom sklearn.model_selection import train_test_split\r\nimport tensorflow_probability as tfp\r\nfrom sklearn.linear_model import LogisticRegression\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.preprocessing import normalize\r\nfrom sklearn.linear_model import Perceptron\r\nfrom sklearn.metrics import accuracy_score\r\nfrom sklearn.preprocessing import StandardScaler\r\n\r\nlabels = np.random.choice([0, 1], size=(400,1))\r\n\r\nbatch_shape = 40\r\n\r\n\r\nclass graph2graph(object):\r\n def __init__(self, sess, test_dir, train_dir, graph_size, output_size, dataset,\r\n batch_size=40, sample_size=40,\r\n gf_dim=10, df_dim=10, L1_lambda=10000, L1_C=100000, additionl=0.001,\r\n input_c_dim=1, output_c_dim=1,\r\n checkpoint_dir=None, sample_dir=None, g_train_num=10, d_train_num=1, c_train_num=2, n_input=1225 * batch_shape,\r\n n_hidden=600 * batch_shape, n_hidden1=24 * batch_shape, n_output=batch_shape, n_regions = 35):\r\n \"\"\"\r\n Args:\r\n sess: TensorFlow session\r\n batch_size: The size of batch. Should be specified before training.\r\n output_size: (optional) The resolution in pixels of the graphs. [256]\r\n gf_dim: (optional) Dimension of gen filters in first conv layer. [64]\r\n df_dim: (optional) Dimension of discrim filters in first conv layer. [64]\r\n input_c_dim: (optional) Dimension of input graph channel. For grayscale input, set to 1. [3]\r\n output_c_dim: (optional) Dimension of output graph channel. For grayscale input, set to 1. [3]\r\n \"\"\"\r\n self.sess = sess\r\n self.is_grayscale = (input_c_dim == 1)\r\n self.batch_size = batch_size\r\n self.graph_size = graph_size\r\n self.sample_size = sample_size\r\n self.output_size = output_size\r\n self.g_train_num = g_train_num\r\n self.d_train_num = d_train_num\r\n self.c_train_num = c_train_num\r\n self.test_dir = test_dir\r\n self.train_dir = train_dir\r\n self.gf_dim = gf_dim\r\n self.df_dim = df_dim\r\n self.labels = labels\r\n self.input_c_dim = input_c_dim\r\n self.output_c_dim = output_c_dim\r\n self.dataset = dataset\r\n self.L1_lambda = L1_lambda\r\n self.n_input = n_input\r\n self.n_hidden = n_hidden\r\n self.n_hidden1 = n_hidden1\r\n self.n_output = n_output\r\n self.L1_c = L1_C\r\n self.additional = additionl\r\n self.n_regions = n_regions\r\n self.vectorized_graph = int((self.n_regions * (self.n_regions - 1)) /2) #outputs an upper triangular part of the graph\r\n self.fully_vectorized_graph = self.n_regions*self.n_regions\r\n\r\n # batch normalization\r\n self.d_bn1 = batch_norm(name='d_bn1')\r\n self.d_bn2 = batch_norm(name='d_bn2')\r\n self.d_bn3 = batch_norm(name='d_bn3')\r\n\r\n self.g_bn_e1 = batch_norm(name='g_bn_e1')\r\n self.g_bn_e2 = batch_norm(name='g_bn_e2')\r\n self.g_bn_e3 = batch_norm(name='g_bn_e3')\r\n self.g_bn_e4 = batch_norm(name='g_bn_e4')\r\n\r\n ##########################################\r\n\r\n self.g_bn_e11 = batch_norm(name='l_bn_e11')\r\n self.g_bn_e22 = batch_norm(name='l_bn_e22')\r\n self.g_bn_e33 = batch_norm(name='l_bn_e33')\r\n self.g_bn_e44 = batch_norm(name='l_bn_e44')\r\n\r\n #########################################\"\r\n\r\n self.g_bn_d1 = batch_norm(name='g_bn_d1')\r\n self.g_bn_d2 = batch_norm(name='g_bn_d2')\r\n self.g_bn_d3 = batch_norm(name='g_bn_d3')\r\n\r\n self.checkpoint_dir = checkpoint_dir\r\n self.build_model()\r\n\r\n def build_model(self):\r\n\r\n n_input = 1225\r\n n_hidden = 1000\r\n n_hidden1 = 100\r\n n_output = 1\r\n\r\n self.X = tf.placeholder(tf.float32)\r\n self.Y = tf.placeholder(tf.float32)\r\n # Weights\r\n self.W1 = tf.Variable(tf.random_uniform([n_input, n_hidden], -1.0, 1.0), name='c_w1')\r\n self.W2 = tf.Variable(tf.random_uniform([n_hidden, n_hidden1], -1.0, 1.0), name='c_w2')\r\n self.W3 = tf.Variable(tf.random_uniform([n_hidden1, n_output], -1.0, 1.0), name='c_w3')\r\n # Bias\r\n self.b1 = tf.Variable(tf.zeros([n_hidden]), name='c_b1')\r\n self.b2 = tf.Variable(tf.zeros([n_hidden1]), name='c_b2')\r\n self.b3 = tf.Variable(tf.zeros([n_output]), name='c_b2')\r\n\r\n self.real_data = tf.placeholder(tf.float32,\r\n [self.batch_size, self.graph_size[0], self.graph_size[1],\r\n self.input_c_dim + self.output_c_dim],\r\n name='real_A_and_B_graphs')\r\n\r\n self.real_A = self.real_data[:, :, :, :self.input_c_dim] # takes the first real graph\r\n self.real_B = self.real_data[:, :, :, self.input_c_dim:self.input_c_dim + self.output_c_dim] # takes the target real graph\r\n self.fake_B = self.generator(self.real_A)\r\n self.latent = self.lat(self.real_A)\r\n self.upper_vector_A = self.upper_triangular_extractor(self.real_A)\r\n self.upper_vector_B = self.upper_triangular_extractor(self.fake_B)\r\n self.multiplex = tf.concat([self.upper_vector_A, self.latent, self.upper_vector_B], 1)\r\n self.real_AB = tf.concat([self.real_A, self.real_B], 3)\r\n self.fake_AB = tf.concat([self.real_A, self.fake_B], 3)\r\n self.D, self.D_logits = self.discriminator(self.real_AB, reuse=False)\r\n self.D_, self.D_logits_ = self.discriminator(self.fake_AB, reuse=True)\r\n self.hy = self.classification_arch(self.X, self.Y)\r\n self.d_sum = tf.summary.histogram(\"d\", self.D)\r\n self.d__sum = tf.summary.histogram(\"d_\", self.D_)\r\n self.fake_B_sum = tf.summary.histogram(\"fake_B\", self.fake_B)\r\n self.latent_sum = tf.summary.histogram(\"latent\", self.latent)\r\n self.d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_logits, labels=tf.ones_like(self.D)))\r\n self.d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_logits_, labels=tf.zeros_like(\r\n self.D_)))\r\n self.g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_logits_, labels=tf.ones_like(self.D_))) \\\r\n + self.L1_lambda * tf.reduce_mean(\r\n tf.abs(self.real_AB - self.fake_AB))\r\n\r\n self.classifier_loss = tf.reduce_mean(-self.Y * tf.log(tf.maximum(self.hy, 1e-9)) - (1 - self.Y) * tf.log(tf.maximum(1 - self.hy, 1e-9))) \\\r\n * self.L1_c + self.additional\r\n\r\n self.l1_regularizer = tf.contrib.layers.l1_regularizer( scale=0.005, scope=None)\r\n weights = tf.trainable_variables()\r\n self.regularization_penalty = tf.contrib.layers.apply_regularization(self.l1_regularizer, weights)\r\n self.d_loss_real_sum = tf.summary.scalar(\"d_loss_real\", self.d_loss_real)\r\n self.d_loss_fake_sum = tf.summary.scalar(\"d_loss_fake\", self.d_loss_fake)\r\n self.d_loss = self.d_loss_real + self.d_loss_fake\r\n self.g_loss_sum = tf.summary.scalar(\"g_loss\", self.g_loss)\r\n self.d_loss_sum = tf.summary.scalar(\"d_loss\", self.d_loss)\r\n self.l_sum = tf.summary.scalar(\"l_loss\", self.latent)\r\n self.classifier_loss_sum = tf.summary.scalar(\"c_loss\", self.classifier_loss)\r\n\r\n t_vars = tf.trainable_variables()\r\n self.d_vars = [var for var in t_vars if 'd_' in var.name]\r\n self.g_vars = [var for var in t_vars if 'g_' in var.name]\r\n self.l_vars = [var for var in t_vars if 'l_' in var.name]\r\n self.c_vars = [var for var in t_vars if 'c_' in var.name]\r\n\r\n self.saver = tf.train.Saver()\r\n\r\n def load_random_samples(self, sample_dir):\r\n sample_data = load_data(sample_dir)\r\n sample = np.random.choice(sample_data, self.batch_size)\r\n sample_graphs = np.array(sample).astype(np.float32)\r\n return sample_graphs\r\n\r\n def sample_model(self, sample_dir, epoch, idx):\r\n sample_graphs = self.load_random_samples(sample_dir)\r\n samples, d_loss, g_loss = self.sess.run(\r\n [self.fake_B_sample, self.d_loss, self.g_loss],\r\n feed_dict={self.real_data: sample_graphs}\r\n )\r\n\r\n print(\"[Sample] d_loss: {:.8f}, g_loss: {:.8f}\".format(d_loss, g_loss))\r\n\r\n def demo(self, args):\r\n second_view = np.zeros((1, self.n_regions, self.n_regions, 1))\r\n tab = np.zeros((1))\r\n \"\"\"Train pix2pix\"\"\"\r\n d_optim = tf.train.AdamOptimizer(args.lr_d, beta1=args.beta1) \\\r\n .minimize(self.d_loss, var_list=self.d_vars) # minimizing the discriminator's loss using Adam optimizer.\r\n\r\n g_optim = tf.train.AdamOptimizer(args.lr_g, beta1=args.beta1) \\\r\n .minimize(self.g_loss, var_list=self.g_vars) # minimizing the generator's loss using Adam optimizer.\r\n\r\n c_optim = tf.train.AdamOptimizer(args.lr_c, beta1=args.beta1) \\\r\n .minimize(self.classifier_loss, var_list=self.c_vars) # minimizing the generator's loss using Adam optimizer.\r\n\r\n init_op = tf.global_variables_initializer() # initialize the variable.\r\n self.sess.run(init_op) # running the initializer.\r\n self.g_sum = tf.summary.merge([self.d__sum,\r\n self.fake_B_sum, self.d_loss_fake_sum, self.g_loss_sum,\r\n self.classifier_loss_sum])\r\n self.d_sum = tf.summary.merge([self.d_sum, self.d_loss_real_sum, self.d_loss_sum])\r\n self.writer = tf.summary.FileWriter(\"./logs\", self.sess.graph)\r\n\r\n counter = 1\r\n start_time = time.time()\r\n data = load_data(args.train_dir, 'train', self.graph_size[0], self.dataset) # load the train data.\r\n Kf = KFold(n_splits=10)\r\n second_counter = 0\r\n\r\n for X_index, Y_index in Kf.split(data):\r\n X_train = data[X_index]\r\n X_test = data[Y_index]\r\n Y_train = labels[X_index]\r\n Y_test = labels[Y_index]\r\n errD_fake = 0\r\n errD_real = 0\r\n best = 4500\r\n errC = 0\r\n best_dis = 2\r\n\r\n\r\n # load testing input\r\n print(\"Loading testing graphs ...\")\r\n sample_graphs_all = X_test\r\n batch_idxs = min(len(sample_graphs_all), args.train_size) // self.batch_size\r\n if self.load(self.checkpoint_dir):\r\n print(\" [*] Load SUCCESS\")\r\n else:\r\n print(\" [!] Load failed...\")\r\n\r\n for idxx in range(0, batch_idxs):\r\n sample_graphs = sample_graphs_all[idxx * self.batch_size:(idxx + 1) * self.batch_size]\r\n sample_graphs = np.array(sample_graphs)\r\n view1_test = sample_graphs[:, :, :, 0:1]\r\n print(\"sampling graph \", idxx)\r\n samples = self.sess.run(\r\n self.fake_B,\r\n feed_dict={self.real_data: sample_graphs})\r\n second_view = np.concatenate((second_view, samples), axis=0)\r\n np.save('second_view', second_view)\r\n\r\n multi_test = self.sess.run([self.latent], feed_dict={self.real_data: sample_graphs})\r\n multi_test = np.reshape(multi_test, (batch_shape, self.n_regions))\r\n\r\n v1_upper_test = self.sess.run([self.upper_vector_A], feed_dict={self.real_A: view1_test})\r\n v1_upper_test = np.reshape(v1_upper_test, (batch_shape, self.vectorized_graph))\r\n\r\n view2_test = np.reshape(samples, (batch_shape, self.n_regions, self.n_regions, 1))\r\n v2_upper_test = self.sess.run([self.upper_vector_B], feed_dict={self.fake_B: view2_test})\r\n v2_upper_test = np.reshape(v2_upper_test, (batch_shape, self.vectorized_graph))\r\n multiplex_test = np.concatenate((v1_upper_test, multi_test, v2_upper_test), axis=1)\r\n\r\n multiplex_final = np.zeros((1, self.fully_vectorized_graph))\r\n if batch_shape > 1:\r\n for k in range(batch_shape):\r\n multiplex_full = multiplex_test[k:k + 1, :]\r\n\r\n multiplex_final = np.concatenate((multiplex_final, multiplex_full), axis=1)\r\n\r\n multiplex_final = multiplex_final[:, self.fully_vectorized_graph:(batch_shape + 1) * self.fully_vectorized_graph]\r\n\r\n for m in range(batch_shape):\r\n multiplex_per_sub = multiplex_final[:, m * self.fully_vectorized_graph:(m + 1) * self.fully_vectorized_graph]\r\n if m == 0:\r\n joined_multiplex_per_sub = multiplex_per_sub\r\n else:\r\n joined_multiplex_per_sub = np.concatenate((joined_multiplex_per_sub, multiplex_per_sub), axis=0)\r\n\r\n if idxx == 0:\r\n multiplex_per_batch = joined_multiplex_per_sub\r\n else:\r\n multiplex_per_batch = np.concatenate((multiplex_per_batch, joined_multiplex_per_sub), axis=0)\r\n\r\n if second_counter == 0:\r\n joined_multiplex_per_batch = multiplex_per_batch\r\n y = Y_test\r\n else:\r\n joined_multiplex_per_batch = np.concatenate((joined_multiplex_per_batch, multiplex_per_batch), axis=0)\r\n y = np.concatenate((y, Y_test), axis=0)\r\n np.save('final_second_view.npy', joined_multiplex_per_batch)\r\n second_counter = second_counter + 1\r\n\r\n\r\n def discriminator(self, graph, y=None, reuse=False):\r\n with tf.variable_scope(\"discriminator\") as scope:\r\n if reuse:\r\n tf.get_variable_scope().reuse_variables()\r\n else:\r\n assert tf.get_variable_scope().reuse == False\r\n h0 = lrelu(e2e(graph, self.df_dim, k_h=self.graph_size[0], name='d_h0_conv'))\r\n h1 = lrelu(self.d_bn1(e2e(h0, self.df_dim * 2, k_h=self.graph_size[0], name='d_h1_conv')))\r\n h2 = lrelu(self.d_bn2(e2n(h1, self.df_dim * 2, k_h=self.graph_size[0], name='d_h2_conv')))\r\n h3 = lrelu(self.d_bn3(n2g(h2, self.df_dim * 2, k_h=self.graph_size[0], name='d_h3_conv')))\r\n h4 = linear(tf.reshape(h3, [self.batch_size, -1]), 1, 'd_h4_lin')\r\n return tf.nn.sigmoid(h4), h4\r\n\r\n def generator(self, graph, y=None):\r\n with tf.variable_scope(\"generator\") as scope:\r\n\r\n e1 = self.g_bn_e1(e2e(lrelu(graph), self.gf_dim, k_h=self.graph_size[0], name='g_e1_conv'))\r\n\r\n e2 = self.g_bn_e2(e2e(lrelu(e1), self.gf_dim * 2, k_h=self.graph_size[0], name='g_e2_conv'))\r\n e2_ = tf.nn.dropout(e2, keep_prob=1)\r\n\r\n e3 = self.g_bn_e3(e2n(lrelu(e2_), self.gf_dim * 2, k_h=self.graph_size[0], name='g_e3_conv'))\r\n\r\n self.d2, self.d2_w, self.d2_b = de_e2n(tf.nn.relu(e3),\r\n [self.batch_size, self.graph_size[0], self.graph_size[0], self.gf_dim * 2], k_h=self.graph_size[0],\r\n name='g_d2', with_w=True)\r\n d2 = tf.nn.dropout(self.g_bn_d2(self.d2), keep_prob=1)\r\n d2 = tf.concat([d2, e2], 3)\r\n\r\n self.d3, self.d3_w, self.d3_b = de_e2e(tf.nn.relu(d2),\r\n [self.batch_size, self.graph_size[0], self.graph_size[0], int(self.gf_dim)],\r\n k_h=self.graph_size[0], name='g_d3', with_w=True)\r\n d3 = self.g_bn_d3(self.d3)\r\n d3 = tf.concat([d3, e1], 3)\r\n\r\n self.d4, self.d4_w, self.d4_b = de_e2e(tf.nn.relu(d3),\r\n [self.batch_size, self.graph_size[0], self.graph_size[0], self.output_c_dim],\r\n k_h=self.graph_size[0], name='g_d4', with_w=True)\r\n\r\n return tf.add(tf.nn.relu(self.d4), graph)\r\n\r\n def upper_triangular_extractor(self, graph, y=None, reuse=True):\r\n reshaped_output_final = np.zeros((1, self.vectorized_graph))\r\n if batch_shape > 1:\r\n for i in range(batch_shape):\r\n output_graph = graph[i:i + 1, :, :, :]\r\n output_graph = tf.reshape(output_graph, [self.n_regions, self.n_regions])\r\n output = upper_triang(output_graph)\r\n reshaped_output = tf.reshape(output, [1, self.vectorized_graph])\r\n reshaped_output_final = tf.concat([reshaped_output_final, reshaped_output], 0)\r\n reshaped_output_final = reshaped_output_final[1:batch_shape + 1, :]\r\n return reshaped_output_final\r\n\r\n def lat(self, graph, y=None):\r\n with tf.variable_scope(\"lat\") as scope:\r\n e1 = self.g_bn_e11(e2e(lrelu(graph), self.gf_dim, k_h=self.graph_size[0], name='l_e1'))\r\n e2 = self.g_bn_e22(e2e(lrelu(e1), self.gf_dim * 2, k_h=self.graph_size[0], name='l_e2'))\r\n e2_ = tf.nn.dropout(e2, keep_prob=1)\r\n e3 = self.g_bn_e33(e2n(lrelu(e2_), self.gf_dim * 2, k_h=self.graph_size[0], name='l_e3'))\r\n latent_space = np.zeros((batch_shape, self.n_regions))\r\n for i in range(20):\r\n extracted_latent = e3[:, :, :, i:i + 1]\r\n extracted_latent = tf.reshape(extracted_latent, [batch_shape, self.n_regions])\r\n latent_space = latent_space + extracted_latent\r\n\r\n return latent_space\r\n\r\n def save(self, checkpoint_dir, step):\r\n model_name = \"g2g.model\"\r\n model_dir = \"%s\" % ('flu')\r\n checkpoint_dir = os.path.join(checkpoint_dir, model_dir)\r\n\r\n if not os.path.exists(checkpoint_dir):\r\n os.makedirs(checkpoint_dir)\r\n\r\n self.saver.save(self.sess,\r\n os.path.join(checkpoint_dir, model_name),\r\n global_step=step)\r\n\r\n def load(self, checkpoint_dir):\r\n print(\" [*] Reading checkpoint...\")\r\n\r\n model_dir = \"%s\" % ('flu')\r\n checkpoint_dir = os.path.join(checkpoint_dir, model_dir)\r\n\r\n ckpt = tf.train.get_checkpoint_state(checkpoint_dir)\r\n if ckpt and ckpt.model_checkpoint_path:\r\n ckpt_name = os.path.basename(ckpt.model_checkpoint_path)\r\n self.saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name))\r\n return True\r\n else:\r\n return False\r\n\r\n def classification_arch(self, x_data, y_data):\r\n # Dataset\r\n x_data = tf.cast(x_data, tf.float32)\r\n self.L2 = tf.sigmoid(tf.matmul(x_data, self.W1) + self.b1)\r\n self.L3 = tf.sigmoid(tf.matmul(self.L2, self.W2) + self.b2)\r\n classification_arch_output = tf.sigmoid(tf.matmul(self.L3, self.W3) + self.b3)\r\n return classification_arch_output\r\n\r\n def classi(self, input_hy, X, Y, epoch):\r\n epochs = epoch\r\n lr = 0.01\r\n display_step = 100\r\n\r\n x_data = X\r\n y_data = Y\r\n hy = input_hy\r\n X = tf.placeholder(tf.float32)\r\n Y = tf.placeholder(tf.float32)\r\n\r\n cost = tf.reduce_mean(-Y * tf.log(hy) - (1 - Y) * tf.log(1 - hy))\r\n optimizer = tf.train.GradientDescentOptimizer(lr).minimize(cost)\r\n\r\n init = tf.global_variables_initializer()\r\n tab = np.zeros(1)\r\n with tf.Session() as sess:\r\n sess.run(init)\r\n for step in range(epochs):\r\n _, c = sess.run([optimizer, cost], feed_dict={X: x_data, Y: y_data})\r\n if step % display_step == 0:\r\n print(\"Cost: \", c)\r\n if step % 2:\r\n errg = np.array(c)\r\n errg = np.reshape(errg, (1))\r\n tab = np.concatenate((tab, errg), axis=0)\r\n answer = tf.equal(tf.floor(hy + 0.1), Y)\r\n accuracy = tf.reduce_mean(tf.cast(answer, \"float\"))\r\n print(sess.run([hy], feed_dict={X: x_data, Y: y_data}))\r\n accuracy_evaluation = accuracy.eval({X: x_data, Y: y_data})\r\n print(\"Accuracy: \", accuracy.eval({X: x_data, Y: y_data}))\r\n return accuracy_evaluation\r\n\r\n","sub_path":"model_demo.py","file_name":"model_demo.py","file_ext":"py","file_size_in_byte":20528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"272033276","text":"\r\nimport pip\r\nimport pika\r\nimport csv\r\n\r\nrecieve_host = '192.168.56.1'\r\nqueue_name = 'hello'\r\n\r\ndef install(package):\r\n pip.main(['install', package])\r\n\r\nconnection = pika.BlockingConnection(\r\n pika.ConnectionParameters(host=recieve_host))\r\nchannel = connection.channel()\r\nchannel.queue_declare(queue=queue_name)\r\n\r\nwith open('main1.csv', newline='') as File: \r\n reader = csv.reader(File)\r\n for row in reader:\r\n channel.basic_publish(exchange = '', routing_key = 'hello', body = row)\r\n print(\" [x] Sent\" + ' ' + row)\r\n\r\nconnection.close()\r\n\r\n","sub_path":"send.py","file_name":"send.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"411762889","text":"#!/usr/bin/env python\nimport distsim\n\nword_to_ccdict = distsim.load_contexts(\"nytcounts.4k\")\n\n\n### provide your answer below\n\n\n###Answer examples; replace with your choices\nfor i, (word, score) in enumerate(distsim.show_nearest(word_to_ccdict, word_to_ccdict['jack'],set(['jack']),distsim.cossim_sparse), start=1):\n print(\"{}: {} ({})\".format(i, word, score))\n","sub_path":"voc/q3.py","file_name":"q3.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"226518476","text":"# ####################################### LOAD REQUIRED LIBRARIES ############################################# #\nimport time\nimport gdal\nimport ogr, osr\nfrom gdalconst import *\nimport struct\nimport csv\nimport baumiTools as bt\nfrom tqdm import tqdm\n# ####################################### SET TIME-COUNT ###################################################### #\nstarttime = time.strftime(\"%a, %d %b %Y %H:%M:%S\", time.localtime())\nprint(\"--------------------------------------------------------\")\nprint(\"Starting process, time:\" + starttime)\nprint(\"\")\n# ####################################### HARD-CODED FOLDERS AND FILES ######################################## #\nshape = bt.baumiVT.CopyToMem(\"L:/_SHARED_DATA/HB_MB/Layers/RandomPoints_CenterFP_20170714.shp\")\nout_file = \"L:/_SHARED_DATA/HB_MB/Layers/RandomPoints_CenterFP_20170714_values.csv\"\n# Landsat_Folder\nL_folder = \"L:/_SHARED_DATA/HB_MB/Landsat/\"\n# ####################################### FUNCTIONS ########################################################### #\ndef ExtracRasterToPoint(lyrRef, feat, rasPath):\n # Open raster-file, get datatype\n ds = gdal.Open(rasPath, GA_ReadOnly)\n pr = ds.GetProjection()\n gt = ds.GetGeoTransform()\n rb = ds.GetRasterBand(1)\n rasdType = bt.baumiRT.GetDataTypeHexaDec(rb.DataType)\n # Create coordinate transformation for point\n source_SR = lyrRef.GetSpatialRef()\n target_SR = osr.SpatialReference()\n target_SR.ImportFromWkt(pr)\n coordTrans = osr.CoordinateTransformation(source_SR, target_SR)\n # Get the coordinates of the point\n geom = feat.GetGeometryRef()\n geom_cl = geom.Clone()\n geom_cl.Transform(coordTrans)\n mx, my = geom_cl.GetX(), geom_cl.GetY()\n # Extract raster value\n px = int((mx - gt[0]) / gt[1])\n py = int((my - gt[3]) / gt[5])\n structVar = rb.ReadRaster(px, py, 1, 1)\n Val = struct.unpack(rasdType, structVar)[0]\n return Val\n# ####################################### PROCESSING ########################################################## #\n# Initialize output\nvalueList = []\nheader = [\"Point_ID\",\"Scene_ID\",\"IndexName\",\"Value\"]\nvalueList.append(header)\n# Open the shapefile and build coordinate transformation\nprint(\"Open shp-file\")\nlyr = shape.GetLayer()\n# Loop through each feature and extract the values\nfeat = lyr.GetNextFeature()\nwhile feat:\n id = feat.GetField(\"UniqueID\")\n print(\"Processing Point-ID \", id)\n # Loop through folders in L_folder\n sceneList = bt.baumiFM.GetFilesInFolderWithEnding(L_folder,\"\",fullPath=True)\n for sc in tqdm(sceneList):\n # Get the product-ID from the xml-File\n prodID = bt.baumiFM.GetFilesInFolderWithEnding(sc,\".xml\",fullPath=False)\n prodID = prodID[:-4]\n # Now get the information at the rasters\n # (1) Pixel_Qa\n img = bt.baumiFM.GetFilesInFolderWithEnding(sc, \"pixel_qa.tif\", fullPath = True)\n try:\n rasval = ExtracRasterToPoint(lyr, feat, img)\n valList = [id, prodID, \"pixelQA\", rasval]\n except:\n valList = [id, prodID, \"pixelQA\", \"NaN\"]\n valueList.append(valList)\n # (2) Sensor_azimuth_band4\n img = bt.baumiFM.GetFilesInFolderWithEnding(sc, \"_sensor_azimuth_band4.tif\", fullPath=True)\n try:\n rasval = ExtracRasterToPoint(lyr, feat, img)\n valList = [id, prodID, \"sensor_azimuth_b4\", rasval]\n except:\n valList = [id, prodID, \"sensor_azimuth_b4\", \"NaN\"]\n valueList.append(valList)\n # (3) Sensor_zenith_band4\n img = bt.baumiFM.GetFilesInFolderWithEnding(sc, \"_sensor_zenith_band4.tif\", fullPath=True)\n try:\n rasval = ExtracRasterToPoint(lyr, feat, img)\n valList = [id, prodID, \"sensor_zenith_b4\", rasval]\n except:\n valList = [id, prodID, \"sensor_zenith_b4\", \"NaN\"]\n valueList.append(valList)\n # (4) Solar_azimuth_band4\n img = bt.baumiFM.GetFilesInFolderWithEnding(sc, \"_solar_azimuth_band4.tif\", fullPath=True)\n try:\n rasval = ExtracRasterToPoint(lyr, feat, img)\n valList = [id, prodID, \"solar_azimuth_b4\", rasval]\n except:\n valList = [id, prodID, \"solar_azimuth_b4\", \"NaN\"]\n valueList.append(valList)\n # (5) Solar_zenith_band4\n img = bt.baumiFM.GetFilesInFolderWithEnding(sc, \"_solar_zenith_band4.tif\", fullPath=True)\n try:\n rasval = ExtracRasterToPoint(lyr, feat, img)\n valList = [id, prodID, \"solar_zenith_b4\", rasval]\n except:\n valList = [id, prodID, \"solar_zenith_b4\", \"NaN\"]\n valueList.append(valList)\n # (6) EVI\n img = bt.baumiFM.GetFilesInFolderWithEnding(sc, \"_sr_evi.tif\", fullPath=True)\n try:\n rasval = ExtracRasterToPoint(lyr, feat, img)\n valList = [id, prodID, \"sr_evi\", rasval]\n except:\n valList = [id, prodID, \"sr_evi\", \"NaN\"]\n valueList.append(valList)\n # (7) Solar_zenith_band4\n img = bt.baumiFM.GetFilesInFolderWithEnding(sc, \"_sr_msavi.tif\", fullPath=True)\n try:\n rasval = ExtracRasterToPoint(lyr, feat, img)\n valList = [id, prodID, \"sr_msavi\", rasval]\n except:\n valList = [id, prodID, \"sr_msavi\", \"NaN\"]\n valueList.append(valList)\n # (8) Solar_zenith_band4\n img = bt.baumiFM.GetFilesInFolderWithEnding(sc, \"_sr_nbr2.tif\", fullPath=True)\n try:\n rasval = ExtracRasterToPoint(lyr, feat, img)\n valList = [id, prodID, \"sr_nbr2\", rasval]\n except:\n valList = [id, prodID, \"sr_nbr2\", \"NaN\"]\n valueList.append(valList)\n # (9) Solar_zenith_band4\n img = bt.baumiFM.GetFilesInFolderWithEnding(sc, \"_sr_ndmi.tif\", fullPath=True)\n try:\n rasval = ExtracRasterToPoint(lyr, feat, img)\n valList = [id, prodID, \"sr_ndmi\", rasval]\n except:\n valList = [id, prodID, \"sr_ndmi\", \"NaN\"]\n valueList.append(valList)\n # (10) Solar_zenith_band4\n img = bt.baumiFM.GetFilesInFolderWithEnding(sc, \"_sr_savi.tif\", fullPath=True)\n try:\n rasval = ExtracRasterToPoint(lyr, feat, img)\n valList = [id, prodID, \"sr_savi\", rasval]\n except:\n valList = [id, prodID, \"sr_savi\", \"NaN\"]\n valueList.append(valList)\n# Take next point\n feat = lyr.GetNextFeature()\nlyr.ResetReading()\n# Write the output-file\nprint(\"Write output\")\nwith open(out_file, \"w\") as theFile:\n csv.register_dialect(\"custom\", delimiter=\",\", skipinitialspace=True, lineterminator='\\n')\n writer = csv.writer(theFile, dialect=\"custom\")\n for element in valueList:\n writer.writerow(element)\n# ####################################### END TIME-COUNT AND PRINT TIME STATS################################## #\nprint(\"\")\nendtime = time.strftime(\"%a, %d %b %Y %H:%M:%S\", time.localtime())\nprint(\"--------------------------------------------------------\")\nprint(\"--------------------------------------------------------\")\nprint(\"start: \" + starttime)\nprint(\"end: \" + endtime)\nprint(\"\")","sub_path":"GIS_Subsetting_PointIntersecting_Sampling/_deprecated/2017-08-01_IRAN-HADI_PointIntersect_ExtractIndicesFromUSGS-DL.py","file_name":"2017-08-01_IRAN-HADI_PointIntersect_ExtractIndicesFromUSGS-DL.py","file_ext":"py","file_size_in_byte":7063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"406345989","text":"#!/usr/bin/python3.8\nimport os\nimport sys\nsys.path.append('/scratch/ppcode')\nsys.path.append('/scratch/ppcode/standard')\nsys.path.append('/scratch/ppcode/standard/palm_std')\nimport numpy as np\nimport palm_data_ext\nfrom palm_data_ext import *\nimport matplotlib.pyplot as plt\n\n\nprjDir = '/scratch/palmdata/JOBS'\n\n\njobName_0 = 'sigma_ref'\njobDir_0 = prjDir + '/' + jobName_0\ntSeq_0, zSeq_0, wutotSeq_0 = velo_pr_palm(jobDir_0, jobName_0, ['.000'], 'wu')\n#tSeq_0, zSeq_0, wursvSeq_0 = velo_pr_palm(jobDir_0, jobName_0, ['.000'], 'w*u*')\n#tSeq_0, zSeq_0, wusgsSeq_0 = velo_pr_palm(jobDir_0, jobName_0, ['.000'], 'w\"u\"')\n#tSeq_0, zSeq_0, wvtotSeq_0 = velo_pr_palm(jobDir_0, jobName_0, ['.000'], 'wv')\n#ustar_0 = np.power(np.power(wutotSeq_0[-1,0],2) + np.power(wvtotSeq_0[-1,0],2),0.25)\n\n\njobName_1 = 'sigma_test'\njobDir_1 = prjDir + '/' + jobName_1\ntSeq_1, zSeq_1, wutotSeq_1 = velo_pr_palm(jobDir_1, jobName_1, ['.000'], 'wu')\n#tSeq_1, zSeq_1, wvtotSeq_1 = velo_pr_palm(jobDir_1, jobName_1, ['.000'], 'wv')\n#ustar_1 = np.power(np.power(wutotSeq_1[-1,0],2) + np.power(wvtotSeq_1[-1,0],2),0.25)\n\n\n\"\"\" u profile of stationary flow (last time step) \"\"\"\nfig, ax = plt.subplots(figsize=(4.5,6))\ntInd = -1\nplt.plot(wutotSeq_0[tInd], zSeq_0/zSeq_0[-1], label='ref-tot', linewidth=1.0, marker='', linestyle='-', color='k')\n#plt.plot(wursvSeq_0[tInd], zSeq_0/zSeq_0[-1], label='rsv', linewidth=1.0, marker='', linestyle='--', color='r')\n#plt.plot(wusgsSeq_0[tInd], zSeq_0/zSeq_0[-1], label='sgs', linewidth=1.0, marker='', linestyle=':', color='b')\n#plt.plot(wursvSeq_0[tInd]+wusgsSeq_0[tInd], zSeq_0/zSeq_0[-1], label='rsv+sgs', linewidth=1.0, marker='o', linestyle='', color='k')\nplt.plot(wutotSeq_1[tInd], zSeq_1/zSeq_1[-1], label='test-tot', linewidth=1.0, marker='', linestyle='-', color='r')\nplt.xlabel(r\"momentum flux $(\\mathrm{m^2/s^2})$\", fontsize=12)\nplt.ylabel('z/H', fontsize=12)\nxaxis_min = -0.1\nxaxis_max = 0.02\nxaxis_d = 0.02\nyaxis_min = 0.0\nyaxis_max = 1.0\nyaxis_d = 0.1\nplt.ylim(yaxis_min - 0.0*yaxis_d,yaxis_max)\nplt.xlim(xaxis_min - 0.0*xaxis_d,xaxis_max)\nplt.xticks(list(np.linspace(xaxis_min, xaxis_max, int((xaxis_max-xaxis_min)/xaxis_d)+1)), fontsize=12)\nplt.yticks(list(np.linspace(yaxis_min, yaxis_max, int((yaxis_max-yaxis_min)/yaxis_d)+1)), fontsize=12)\nplt.legend(bbox_to_anchor=(0.05,0.86), loc=6, borderaxespad=0, fontsize=12) # (1.05,0.5) is the relative position of legend to the origin, loc is the reference point of the legend\nplt.grid()\nplt.title('')\nfig.tight_layout() # adjust the layout\nsaveName = 'flux' + '_pr.png'\nsaveDir = '/scratch/prjdata/sigma_imp'\n#plt.savefig(saveDir + '/' + saveName)\nplt.show()\nplt.close()","sub_path":"standard/palm/plotting/flux_pr.py","file_name":"flux_pr.py","file_ext":"py","file_size_in_byte":2643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"280221532","text":"import json\nimport os\nfrom django.conf import settings\nfrom django.http import Http404\nfrom django.shortcuts import render\nfrom django.template import Template, Context\nfrom django.template.loader_tags import BlockNode\nfrom django.utils._os import safe_join\n\n\ndef get_page_or_404(name):\n \"\"\"Return page content as a Django template or raise 404 error.\n\n Given a page name, try get the absolute file path. If ValueError exception or in case\n file path doesn't exist raise a 404 error. If exists open each file and instantiates a new Django template object\n with its contents. Loop through the page's raw nodelist and checks for BlockNode with the name context. If BlockNode\n is found, it defines a metavariable for us that contains that content.\n\n :param name: the page name\n :raise ValueError: raises an exception\n :return: render as a template the page content inside page folder\n \"\"\"\n try:\n file_path = safe_join(settings.SITE_PAGES_DIRECTORY, name) # absolute file path\n except ValueError: # exceptional outcome\n raise Http404('Page Not Found')\n else: # not exceptional outcome\n if not os.path.exists(file_path):\n raise Http404('Page Not found')\n\n with open(file_path, 'r') as f:\n page = Template(f.read()) # instatiates a new \n\n meta = None\n for i, node in enumerate(list(page.nodelist)):\n if isinstance(node, BlockNode) and node.name == 'context':\n meta = page.nodelist.pop(i)\n break\n page._meta = meta\n return page\n\n\ndef page(request, slug='index'):\n \"\"\"Render the requested page if found.\n\n Given a slug generate the slug.html string. Try to obtain the corresponding template calling get_page_or_404() view.\n If page metavariable is not None, turn JSON into Python data structure (dict) and update context dict.\n\n :param request:\n :param slug: refers to static html pages inside page folder (default = index)\n :return: passes the page and slug context to be rendered by the page.html layout template\n \"\"\"\n file_name = '{}.html'.format(slug)\n page = get_page_or_404(file_name)\n context = {\n 'slug': slug, # \n 'page': page, # \n }\n if page._meta is not None:\n meta = page._meta.render(Context())\n extra_context = json.loads(meta)\n context.update(extra_context)\n # context['page'] will render the Template by {% include var %} template tag\n return render(request, 'page.html', context)\n","sub_path":"sitebuilder/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"522323888","text":"#!/usr/bin/env python3\nfrom remi import start\nfrom threading import Timer\nimport typer\nfrom . import api, remi_ui\n\n\ncli = typer.Typer()\n\n@cli.command()\ndef main(\n\t\taddress : str = \"localhost\", # network interface ip\n\t\tport : int = 8001, # http listen port\n\t\tapi_base : str = \"https://brzeczyszczykiewicz.pvv.ntnu.no/api\", #Link to where your Grzegorz API is hosted\n\n\t\thost_name : str = None, # a string containing the host name or remote ip address that allows to access to your app.\n\t\twebsocket_port : int = 0, # websocket port, 0 makes it random\n\n\t\t# In order to limit the remote access to your interface you\n\t\t# can define a username and password. It probably uses http basic-auth\n\t\tusername : str = None,\n\t\tpassword : str = None,\n\n\t\t# Open a PyWebView window instead of using the browser. This requires pywebview to be installed.\n\t\t# This will negate all other options\n\t\tstandalone : bool = False,\n\n\t\tstart_browser : bool = False, # Defines whether the browser should be opened automatically at startup\n\t\tmultiple_instance : bool = False, # Multipe instance. If True, multiple clients that connects to your script has different App instances\n\t\tenable_file_cache : bool = True, # Cache files in \"res\" folder\n\n\t\t# set to false to force the volume to be zero.\n\t\t# Great for remote development!\n\t\tvolume : bool = True,\n\t\t):\n\n\tif not volume:\n\t\tprint(\"Keeping volume down\")\n\t\tdef keep_volume_down():\n\t\t\tapi.set_volume(0)\n\t\t\tTimer(5, keep_volume_down).start()\n\t\tTimer(5, keep_volume_down).start()\n\n\n\tapi.set_endpoint(api_base)\n\n\t# start the webserver:\n\n\tif standalone: # it's picky :(\n\t\tstart(\n\t\t\tremi_ui.RemiApp,\n\t\t\ttitle = \"Gregorz\",\n\t\t\tstandalone = standalone\n\t\t)\n\telse:\n\t\tstart(\n\t\t\tremi_ui.RemiApp,\n\t\t\ttitle = \"Gregorz\",\n\t\t\taddress = address,\n\t\t\tport = port,\n\t\t\thost_name = host_name,\n\t\t\twebsocket_port = websocket_port,\n\t\t\tusername = username,\n\t\t\tpassword = password,\n\t\t\tstandalone = standalone,\n\t\t\tstart_browser = start_browser,\n\t\t\tmultiple_instance = multiple_instance,\n\t\t\tenable_file_cache = enable_file_cache,\n\n\t\t)\n\n\nif __name__ == \"__main__\":\n\tcli()\n","sub_path":"grzegorz_clients/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":2233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"334285128","text":"#!/usr/local/bin/python3.6\r\nimport os\r\nimport sys\r\nimport traceback\r\n\r\nfrom pubsub import pub\r\nimport wx\r\nfrom wx.lib.dialogs import ScrolledMessageDialog\r\nfrom wx.lib.agw.hyperlink import HyperLinkCtrl\r\n\r\nfrom pyxenoverse.bac import BAC\r\nfrom pyxenoverse.gui import create_backup\r\nfrom yabac.panels.main import MainPanel\r\nfrom yabac.panels.side import SidePanel\r\nfrom yabac.dlg.find import FindDialog\r\nfrom yabac.dlg.replace import ReplaceDialog\r\n\r\n\r\n\r\nVERSION = '0.4.6'\r\n\r\n\r\n\r\nclass MainWindow(wx.Frame):\r\n def __init__(self, parent, title, dirname, filename):\r\n sys.excepthook = self.exception_hook\r\n self.dirname = ''\r\n self.locale = wx.Locale(wx.LANGUAGE_ENGLISH)\r\n\r\n # A \"-1\" in the size parameter instructs wxWidgets to use the default size.\r\n # In this case, we select 200px width and the default height.\r\n wx.Frame.__init__(self, parent, title=title, size=(1300, 900))\r\n self.statusbar = self.CreateStatusBar() # A Statusbar in the bottom of the window\r\n\r\n # Panels\r\n # TODO: Get actual panels for these\r\n self.main_panel = MainPanel(self)\r\n self.entry_panel = SidePanel(self)\r\n\r\n # Setting up the menu.\r\n file_menu= wx.Menu()\r\n file_menu.Append(wx.ID_OPEN)\r\n file_menu.Append(wx.ID_SAVE)\r\n file_menu.Append(wx.ID_CONVERT, \"Convert for Skill Creator\")\r\n file_menu.Append(wx.ID_ABOUT)\r\n file_menu.Append(wx.ID_EXIT)\r\n\r\n edit_menu = wx.Menu()\r\n edit_menu.Append(wx.ID_FIND)\r\n edit_menu.Append(wx.ID_REPLACE)\r\n\r\n # Creating the menu bar.\r\n menu_bar = wx.MenuBar()\r\n menu_bar.Append(file_menu, \"&File\")\r\n menu_bar.Append(edit_menu, \"&Edit\")\r\n self.SetMenuBar(menu_bar)\r\n\r\n # Publisher\r\n pub.subscribe(self.open_bac, 'open_bac')\r\n pub.subscribe(self.load_bac, 'load_bac')\r\n pub.subscribe(self.save_bac, 'save_bac')\r\n pub.subscribe(self.set_status_bar, 'set_status_bar')\r\n\r\n # Events.\r\n self.Bind(wx.EVT_MENU, self.open_bac, id=wx.ID_OPEN)\r\n self.Bind(wx.EVT_MENU, self.save_bac, id=wx.ID_SAVE)\r\n self.Bind(wx.EVT_MENU, self.on_find, id=wx.ID_FIND)\r\n self.Bind(wx.EVT_MENU, self.on_replace, id=wx.ID_REPLACE)\r\n self.Bind(wx.EVT_MENU, self.on_convert, id=wx.ID_CONVERT)\r\n self.Bind(wx.EVT_MENU, self.on_about, id=wx.ID_ABOUT)\r\n self.Bind(wx.EVT_MENU, self.on_exit, id=wx.ID_EXIT)\r\n self.Bind(wx.EVT_BUTTON, self.open_bac, id=wx.ID_OPEN)\r\n self.Bind(wx.EVT_BUTTON, self.save_bac, id=wx.ID_SAVE)\r\n accelerator_table = wx.AcceleratorTable([\r\n (wx.ACCEL_CTRL, ord('o'), wx.ID_OPEN),\r\n (wx.ACCEL_CTRL, ord('s'), wx.ID_SAVE),\r\n (wx.ACCEL_CTRL, ord('f'), wx.ID_FIND),\r\n (wx.ACCEL_CTRL, ord('h'), wx.ID_REPLACE),\r\n ])\r\n self.SetAcceleratorTable(accelerator_table)\r\n\r\n # Name\r\n self.name = wx.StaticText(self, -1, '(No file loaded)')\r\n font = wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_BOLD)\r\n self.name.SetFont(font)\r\n\r\n # Buttons\r\n open_button = wx.Button(self, wx.ID_OPEN, \"Load\")\r\n save_button = wx.Button(self, wx.ID_SAVE, \"Save\")\r\n self.hidden = wx.CheckBox(self, -1, 'Hide Empty Entries')\r\n self.hidden.SetValue(True)\r\n self.hidden.Bind(wx.EVT_CHECKBOX, self.on_check)\r\n\r\n hyperlink = HyperLinkCtrl(self, -1, \"What do all these things mean?\",\r\n URL=\"https://docs.google.com/document/d/\"\r\n \"18gaAbNCeJyTgizz5IvvXzjWcH9K5Q1wvUHTeWnp8M-E/edit#heading=h.v77lp7pp65pd\")\r\n\r\n # Sizer\r\n sizer = wx.BoxSizer(wx.VERTICAL)\r\n button_sizer = wx.BoxSizer()\r\n button_sizer.Add(open_button)\r\n button_sizer.AddSpacer(10)\r\n button_sizer.Add(save_button)\r\n button_sizer.Add(self.hidden, 0, wx.ALL, 10)\r\n button_sizer.Add(hyperlink, 0, wx.ALL, 10)\r\n\r\n panel_sizer = wx.BoxSizer()\r\n panel_sizer.Add(self.main_panel, 1, wx.ALL | wx.EXPAND)\r\n panel_sizer.Add(self.entry_panel, 2, wx.ALL | wx.EXPAND)\r\n\r\n sizer.Add(self.name, 0, wx.CENTER)\r\n sizer.Add(button_sizer, 0, wx.ALL, 10)\r\n sizer.Add(panel_sizer, 1, wx.ALL | wx.EXPAND)\r\n\r\n self.SetBackgroundColour('white')\r\n self.SetSizer(sizer)\r\n self.SetAutoLayout(1)\r\n\r\n # Lists\r\n self.entry_list = self.main_panel.entry_list\r\n\r\n # Dialogs\r\n self.find = FindDialog(self, self.entry_list, -1)\r\n self.replace = ReplaceDialog(self, self.entry_list, -1)\r\n\r\n sizer.Layout()\r\n self.Show()\r\n\r\n if filename:\r\n self.load_bac(dirname, filename)\r\n\r\n def exception_hook(self, etype, value, trace):\r\n dlg = ScrolledMessageDialog(self, ''.join(traceback.format_exception(etype, value, trace)), \"Error\")\r\n dlg.ShowModal()\r\n dlg.Destroy()\r\n\r\n def on_about(self, _):\r\n # Create a message dialog box\r\n dlg = wx.MessageDialog(self, f\" Yet another BAC Organizer v{VERSION} by Kyonko Yuuki\",\r\n \"About YaBAC Organizer\", wx.OK)\r\n dlg.ShowModal() # Shows it\r\n dlg.Destroy() # finally destroy it when finished.\r\n\r\n def on_exit(self, _):\r\n self.Disable()\r\n self.Close(True) # Close the frame.\r\n\r\n def open_bac(self, _):\r\n dlg = wx.FileDialog(self, \"Choose a file\", self.dirname, \"\", \"*.bac\", wx.FD_OPEN)\r\n if dlg.ShowModal() == wx.ID_OK:\r\n self.load_bac(dlg.GetDirectory(), dlg.GetFilename())\r\n dlg.Destroy()\r\n\r\n def load_bac(self, dirname, filename):\r\n self.dirname = dirname\r\n path = os.path.join(self.dirname, filename)\r\n self.statusbar.SetStatusText(\"Loading...\")\r\n new_bac = BAC()\r\n if not new_bac.load(path):\r\n dlg = wx.MessageDialog(self, f\"{filename} is not a valid BAC\", \"Warning\")\r\n dlg.ShowModal()\r\n dlg.Destroy()\r\n return\r\n self.main_panel.bac = new_bac\r\n self.main_panel.bac.loadComment(path)\r\n self.main_panel.build_tree()\r\n pub.sendMessage('hide_panels')\r\n self.name.SetLabel(filename)\r\n self.main_panel.Layout()\r\n self.statusbar.SetStatusText(f\"Loaded {path}\")\r\n\r\n def save_bac(self, _):\r\n if self.main_panel.bac is None:\r\n dlg = wx.MessageDialog(self, \" No BAC Loaded\", \"Warning\", wx.OK)\r\n dlg.ShowModal() # Shows it\r\n dlg.Destroy() # finally destroy it when finished.\r\n return\r\n\r\n dlg = wx.FileDialog(self, \"Save as...\", self.dirname, \"\", \"*.bac\", wx.FD_SAVE)\r\n if dlg.ShowModal() == wx.ID_OK:\r\n filename = dlg.GetFilename()\r\n self.dirname = dlg.GetDirectory()\r\n self.statusbar.SetStatusText(\"Saving...\")\r\n create_backup(self.dirname, filename)\r\n path = os.path.join(self.dirname, filename)\r\n self.main_panel.bac.save(path)\r\n self.main_panel.bac.saveComment(path)\r\n self.statusbar.SetStatusText(f\"Saved {path}\")\r\n saved = wx.MessageDialog(self, f\"Saved to {path} successfully\", \"BAC Saved\")\r\n saved.ShowModal()\r\n saved.Destroy()\r\n dlg.Destroy()\r\n\r\n def on_check(self, _):\r\n self.main_panel.build_tree()\r\n pub.sendMessage('hide_panels')\r\n\r\n def on_convert(self, _):\r\n pub.sendMessage('convert_for_skill_creator')\r\n\r\n def on_find(self, _):\r\n if not self.replace.IsShown():\r\n self.find.Show()\r\n\r\n def on_replace(self, _):\r\n if not self.find.IsShown():\r\n self.replace.Show()\r\n\r\n def set_status_bar(self, text):\r\n self.statusbar.SetStatusText(text)\r\n\r\n\r\nif __name__ == '__main__':\r\n app = wx.App(False)\r\n dirname = filename = None\r\n if len(sys.argv) > 1:\r\n dirname, filename = os.path.split(sys.argv[1])\r\n frame = MainWindow(None, f\"YaBAC Organizer v{VERSION}\", dirname, filename)\r\n app.MainLoop()\r\n","sub_path":"YaBAC Organizer.py","file_name":"YaBAC Organizer.py","file_ext":"py","file_size_in_byte":8090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"234035267","text":"def sumDigits(x):\n\ttotal = 0\n\twhile x > 0:\n\t\tones = x % 10\n\t\ttotal += ones\n\t\tx = (x - ones) / 10\n\treturn total\n\ndef factorial(x):\n\tif x == 1:\n\t\treturn x\n\telse:\n\t\treturn x * factorial(x-1)\n\nprint(sumDigits(factorial(100)))\n","sub_path":"python/20eu.py","file_name":"20eu.py","file_ext":"py","file_size_in_byte":222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"92767557","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Dec 4 21:12:59 2018\n\n@author: ypbehere\n\"\"\"\n\nfrom time import sleep\nfrom selenium import webdriver\n\ndriver = webdriver.Chrome()\nwebs = []\nfor i in range(1, 5):\n driver.get('https://event.hktdc.com/fair/hktoyfair-tc/Event-Exhibitor-List/%E9%A6%99%E6%B8%AF%E8%B2%BF%E7%99%BC%E5%B1%80%E9%A6%99%E6%B8%AF%E7%8E%A9%E5%85%B7%E5%B1%95/?page='+str(i)+'&pageItem=500&view=list')\n sleep(20)\n total = 501 if i < 4 else 171\n for j in range(1, total):\n list = driver.find_element_by_xpath('//div[@id=\"item_lists\"]/div[%d]/div[2]/div[1]/a[1]' % j)\n href = list.get_attribute('href')\n webs.append(href)\ndriver.close()\nwith open('webs.txt', 'w') as f:\n for web in webs:\n f.write(web+'\\n')","sub_path":"crawler/worn.py","file_name":"worn.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"503916197","text":"from dataclasses import field\n\nfrom wecs.core import Component, System, UID, and_filter\n\n\n# Rooms, and being in a room\n@Component()\nclass Room:\n # Neighboring room entities\n adjacent: list = field(default_factory=list)\n # Entities (thought to be) in the room\n presences: list = field(default_factory=list)\n # Presence entered the room\n arrived: list = field(default_factory=list)\n # Presence continues to be present\n continued: list = field(default_factory=list)\n # Presences that left the room\n gone: list = field(default_factory=list)\n\n\n@Component()\nclass RoomPresence:\n room: UID\n # Entities perceived\n presences: list = field(default_factory=list)\n\n\n@Component()\nclass ChangeRoomAction:\n room: UID # Room to change to\n\n\nclass EntityNotInARoom(Exception): pass\n\n\nclass ItemNotInARoom(Exception): pass\n\n\nclass RoomsNotAdjacent(Exception): pass\n\n\ndef is_in_room(item, entity, throw_exc=False):\n if not entity.has_component(RoomPresence):\n if throw_exc:\n raise EntityNotInARoom\n else:\n return False\n presence = entity.get_component(RoomPresence)\n\n if item._uid not in presence.presences:\n if throw_exc:\n raise ItemNotInARoom\n else:\n return False\n\n return True\n\n\nclass PerceiveRoom(System):\n entity_filters = {\n 'room': and_filter([Room]),\n 'presences': and_filter([RoomPresence]),\n }\n\n def update(self, filtered_entities):\n # Clean the bookkeeping lists\n for entity in filtered_entities['room']:\n room = entity.get_component(Room)\n room.arrived = []\n room.continued = []\n room.gone = []\n # New arrivals to rooms, and continued presences\n for entity in filtered_entities['presences']:\n room_uid = entity.get_component(RoomPresence).room\n room_entity = self.world.get_entity(room_uid)\n room = room_entity.get_component(Room)\n if entity._uid not in room.presences:\n room.arrived.append(entity._uid)\n else:\n room.continued.append(entity._uid)\n # Checking who is gone\n for entity in filtered_entities['room']:\n room = entity.get_component(Room)\n for presence in room.presences:\n if presence not in room.continued:\n room.gone.append(presence)\n # Rebuilding the presence lists\n for entity in filtered_entities['room']:\n room = entity.get_component(Room)\n room.presences = room.arrived + room.continued\n # Let the presences perceive the presences in the room\n for entity in filtered_entities['presences']:\n presence = entity.get_component(RoomPresence)\n room_entity = self.world.get_entity(presence.room)\n room = room_entity.get_component(Room)\n presence.presences = room.presences\n\n\nclass ChangeRoom(System):\n entity_filters = {\n 'act': and_filter([ChangeRoomAction, RoomPresence])\n }\n\n def update(self, filtered_entities):\n for entity in filtered_entities['act']:\n room = self.world.get_entity(\n entity.get_component(RoomPresence).room,\n )\n target = entity.get_component(ChangeRoomAction).room\n\n if target not in room.get_component(Room).adjacent:\n if self.throw_exc:\n raise RoomsNotAdjacent\n else:\n entity.get_component(RoomPresence).room = target\n\n entity.remove_component(ChangeRoomAction)\n","sub_path":"wecs/rooms.py","file_name":"rooms.py","file_ext":"py","file_size_in_byte":3611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"208339037","text":"\nimport time\nimport random\n\n\ndef averageCase(algo, sample):\n\n\tprint(\"calculating average with sample of 100 000\")\n\ttimes = []\n\tfor i in range(sample):\n\t\ttimes.append(timer(algo, random.sample(xrange(100000), 100000)))\n\treturn sum(times)/len(times)\n\n\ndef timer(algo, list):\n\tt0 = time.clock()\n\talgo(list)\n\treturn time.clock() - t0\n\n\ndef properties(algo):\n\t# measure process time\n\ttime = 0\n\ttime = timer(algo, range(1,100000))\n\tprint(\"worst case: input size 100 000, time \", time)\n\tprint(\"average case: input size \", 100000, \"time \", averageCase(algo, 200))\n\n\ndef measure(algo, list):\n\tt0 = time.clock()\n\talgo(list)\n\tprint(\"size \", len(list), \" time \", time.clock() - t0)\n\treturn time.clock() - t0\n\n \t\n\n","sub_path":"Protoyping/Python/algotest.py","file_name":"algotest.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"11371645","text":"import matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nimport matplotlib.cm as cm\nimport random\nimport seaborn as sns\n\n# x = regiones (codigo_region)\n# y = promedio (avg_grades)\n\ndf = pd.read_csv(r\"D:\\\\Documentos_U\\\\2020-1\\\\Patos\\\\Proyecto\\\\chilean-student-performance\\\\out\\\\alumnosvsdocentes\\\\comunasmetropolitana\\\\results-alumnosvsdocentes-comunas.csv\", sep = ';')\n\n\n\n#df.plot() # plots all columns against index\n#df.plot(kind='scatter',x='codigo_region',y='avg_grades', ) # scatter plot\n#df.plot(kind='density') # estimate density function\n\nco = [(1, 0.5, 0),\n (0.85, 0.19, 0.23),\n (0.15, 0.23, 0.73),\n (0.13, 0.53, 0.21),\n (0.5, 0.3, 0.9)]\n\n\ncat_1 = df[df['year']==2013]\n\n\ncat_2 = df[df['year']==2014]\n\n\ncat_3 = df[df['year']==2015]\n\n\ncat_4 = df[df['year']==2016]\n\n\ncat_5 = df[df['year']==2017]\n\n\ncat_6 = df[df['year']==2018]\n\n\n#sns.set(style=\"darkgrid\")\n\n\n\n\n\n\nlabels =['Santiago','Cerrillos','Cerro Navia','Conchali','El Bosque','E. Central','Huechuraba','Indepen.','La Cisterna','La Florida','La Granja','La Pintana','La Reina', 'Las Condes','Lo Barnechea','Lo Espejo','Lo Prado','Macul','Maipú','Ñuñoa','P.A.C','Peñalolen','Providencia','Pudahuel','Quilicura','Q. Normal','Recoleta','Renca', 'San Joaquín','San Miguel','San Ramon','Vitacura']\n\nx = []\nfig, ax = plt.subplots()\n\nx = np.arange(32)\n\n\n\n\n\nwidth = 0.2\n\nm=df[df['year']==2013]\n\n#ax.bar(x - width/2, df[df['year']==2017]['avg_grades_alumnos'], color=([255/255, 109/255, 197/255]),width= width)\n# set x-axis label\n\nm.avg_grades_alumnos.plot(kind='bar', color=([255/255, 109/255, 197/255]), ax=ax, width=width, position=1)\nax.set_xlabel(\"Comunas\",fontsize=14)\n# set y-axis label\nax.set_ylabel(\"Promedio Alumnos\",color=([255/255, 109/255, 197/255]),fontsize=14)\nax.set_ylim([1, 7])\n\n\n# twin object for two different y-axis on the sample plot\nax2=ax.twinx()\n# make a plot with different y-axis using second axis object\n#ax2.bar(x + width/2, df[df['year']==2017]['avg_grades_docentes'],color=([84/255, 88/255, 255/255]),width= width)\nm.avg_grades_docentes.plot(kind='bar', color=([84/255, 88/255, 255/255]), ax=ax2, width=width, position=0)\nax2.set_ylabel(\"Promedio Docentes\",color=([84/255, 88/255, 255/255]),fontsize=14)\nax2.set_ylim([1, 4])\nplt.xlabel(\"Comunas\")\n\nplt.xlim([-1,32])\nax.set_xticks(x)\nax.set_xticklabels(labels)\nplt.title(\"Rendimiento alumnos y docentes por comunas de Santiago (2013)\")\n\nplt.show()\n# save the plot as a file\n\n\"\"\"\n\nX = df[df['year']==2017]['region']\ncolor=(random.random(), random.random(), random.random(), 1.0)\n#X = X + np.random.normal(size=X.shape)*0.07\nY = df[df['year']==2017]['avg_grades_alumnos']\n#Y = Y + np.random.normal(size=X.shape)*0.03\n\n\n\nX2 = df[df['year']==2017]['region']\n#X = X + np.random.normal(size=X.shape)*0.07\nY2 = df[df['year']==2017]['avg_grades_docentes']\n#Y = Y + np.random.normal(size=X.shape)*0.03\nplt.bar(X, Y,secondary_y= Y2, alpha=0.5)\n\n\nplt.xlabel(\"Regiones\")\nplt.ylabel(\"Promedios\")\nplt.title(\"Rendimientos alumnos y docentes por region (2017)\")\nplt.legend(['Alumnos','Regiones','III','IV','V','VI','VII','VIII','IX','X','XI','XIII','RM', 'XIV','XV'\n ], framealpha=1)\n\n\nplt.show()\n\"\"\"","sub_path":"src/main/python/alumnosvsdocentes/barplot_comunas.py","file_name":"barplot_comunas.py","file_ext":"py","file_size_in_byte":3166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"37865161","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jun 3 17:58:05 2019\n\n@author: Jian Zhou\n\"\"\"\nimport torch\nimport torch.nn as nn\n\nimport numpy as np\nimport torch.nn.functional as F\nfrom torch.nn.functional import upsample\n\nclass NET(nn.Module):\n def __init__(self):\n super(NET, self).__init__()\n #self.conv1 = nn.Conv2d(1, 4, 3)\n # 1 input image channel, 4 output channels, 3x3 square convolution\n self.conv_layer1 = self.double_conv(1,32)\n self.pool1 = nn.MaxPool3d((2,2,2), stride=(2,2,2)) # Check whether it is subsampled by 2 ?\n # Set for D_in = 388 \n #self.conv_layer2 = self.double_conv(32,64)\n #self.pool2 = nn.MaxPool3d((2,2,2), stride=(2,2,2))\n # Set for D_in = 388\n self.conv_layer2 = self.double_conv(32,64)\n self.upsample = nn.Upsample(scale_factor=2, mode='trilinear')\n # nearest neighbor and linear, bilinear, bicubic and trilinear for 3D, 4D and 5D input Tensor, respectively.\n # Check whether it is upsampled by 2\n self.dconv_up2 = self.double_conv(64+32, 32)\n #self.dconv_up1 = self.double_conv(64+32, 32)\n self.out = nn.Conv3d(32, 2, (30,1,1)) \n # projection to 2 probabilities for 2 labers: on the circle or outside circle. 1 square convolution for H and W\n # 80 for Depth dimension. Outsizes:[1,2,1,96,104]\n\n############# \n #self.keep=self.conv_layer1.weight.data # which I added \n #self.keep=self.conv_layer1.parameters()\n #w = list(self.conv_layer1.parameters())\n #print('conv_laryer1 weights are', w)\n #W=np.array(w)\n #print('Shape of W is ', W.shape,' Type of W is ', type(W))\n #print('Type of W[1] is ', type(W[1]))# \n #print('Max of Conv1 weights is', max(w), 'Min of Conv1 weights is ', min(w))\n\n #w_1=list(self.out.parameters())\n #W_1=np.array(w_1)\n #print('Shape of W_1 is ', W_1.shape,' Type of W is ', type(W_1))\n #print(' Max of W_1 is ', np.amax(W_1[1,:]))\n #print('Max of out weights is', max(w_1),'Min of Conv1 weights is ', min(w_1))\n##############\n \n def forward(self, x): # U-Net\n imsize = x.size()[2:]\n # Check what's the meaning of this ?? \n # 3DCNN input: [N, C_in, Dep_in, Hei_in, Wid_in]\n # output: [N, C_out, Dep_out, Hei_out, Wid_out] \n conv1 = self.conv_layer1(x) # x: [batch_size, In_channels, Height, Width]\n #x= self.reduce()\n x = self.pool1(conv1)\n #conv2 = self.conv_layer2(x)\n #x = self.pool2(conv2)\n x = self.conv_layer2(x)\n \n x = self.upsample(x) \n x = torch.cat([x, conv1], dim=1) # concatenate, reuse feature map in different steps\n x = self.dconv_up2(x)\n #x = self.upsample(x)\n #x = torch.cat([x, conv1], dim=1)\n # concatenate, reuse feature map from the output of convolution layers in different steps\n #x = self.dconv_up1(x)\n x = self.out(x)\n \n # which I added to test whether the weight has changed\n #if (self.conv_layer1.weight.data==self.keep).all(): print('same!')\n #w_1 = list(self.conv_layer1.parameters())\n \n #w_1=list(self.out.parameters())\n #print('out weights are', w_1)\n #if (self.conv_layer1.parameters()==self.keep).all(): print('same!')\n #else: print('Weights are', w_1) \n \n return x\n \n def double_conv(self, in_channels, out_channels):\n return nn.Sequential(\n nn.Conv3d(in_channels, out_channels, (1,3,3), padding=(0,1,1)), # Filter size is 1 means no filter. \n # Don't conversion on depth dimention\n nn.BatchNorm3d(out_channels), # out_channels is number of features\n nn.ReLU(inplace=True), # Meaning of True??\n nn.Conv3d(out_channels, out_channels, (1,3,3), padding=(0,1,1)),\n nn.BatchNorm3d(out_channels),\n )","sub_path":"Model3DTest.py","file_name":"Model3DTest.py","file_ext":"py","file_size_in_byte":4008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"495054160","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Oct 16 23:16:16 2017\r\n\r\n@author: Sandesh\r\n\"\"\"\r\n\r\nimport tensorflow as tf\r\nimport numpy as np\r\n\r\nfrom tensorflow.examples.tutorials.mnist import input_data\r\nmnist = input_data.read_data_sets(\"MNIST_data/\",one_hot=True)\r\n\r\nnumClasses=10\r\ninputSize=784\r\ntrainingIterations=20000\r\nbatchSize=64\r\ntf.reset_default_graph()\r\n\r\nX = tf.placeholder(tf.float32,shape=[None,inputSize])\r\ny = tf.placeholder(tf.float32,shape=[None,numClasses])\r\n\r\nW1 = tf.Variable(tf.random_normal([inputSize,numClasses],stddev=0.1))\r\nB1 = tf.Variable(tf.constant(0.1), [numClasses])\r\n\r\ny_pred = tf.nn.softmax(tf.matmul(X,W1)+B1)\r\n\r\nloss = tf.reduce_mean(tf.square(y-y_pred))\r\nopt = tf.train.GradientDescentOptimizer(learning_rate=0.05).minimize(loss)\r\n\r\ncorrect_prediction = tf.equal(tf.arg_max(y_pred,1), tf.arg_max(y,1))\r\naccuracy = tf.reduce_mean(tf.cast(correct_prediction,\"float\"))\r\n\r\nsess = tf.Session()\r\ninit = tf.global_variables_initializer()\r\nsess.run(init)\r\n\r\nfor i in range(trainingIterations):\r\n batch = mnist.train.next_batch(batchSize)\r\n batchInput = batch[0]\r\n batchLabels = batch[1]\r\n _, trainingLoss = sess.run([opt,loss], feed_dict={X:batchInput,\r\n y:batchLabels})\r\n if i%1000 == 0:\r\n train_accuracy = accuracy.eval(session=sess, feed_dict={X: batchInput, y: batchLabels})\r\n print (\"step %d, training accuracy %g\"%(i, train_accuracy))\r\n\r\nbatch = mnist.test.next_batch(batchSize)\r\ntestAccuracy = sess.run(accuracy, feed_dict={X: batch[0], y: batch[1]})\r\nprint (\"test accuracy %g\"%(testAccuracy))","sub_path":"LogisticRegressionWithTensorFlow.py","file_name":"LogisticRegressionWithTensorFlow.py","file_ext":"py","file_size_in_byte":1586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"107198583","text":"\"\"\"\ndb2.py\n=========\n\nThis module provides DB2 interface.\n\n:copyright: (c) 2014 by Asif Jalil.\n:license: BSD, see LICENSE for more details.\n\"\"\"\n\nfrom __future__ import print_function\nimport logging\nimport pprint\nimport ibm_db\nimport re\n\nlog = logging.getLogger(__name__)\nlog.addHandler(logging.NullHandler())\n\ndef _date_handler(o):\n return o.isoformat() if hasattr(o, 'isoformat') else 0\n\nclass DB2(object):\n \n #app = current_app._get_current_object()\n\n def __init__(self, app):\n self._conn = None\n self.app = app._get_current_object()\n\n def __enter__(self):\n try:\n self._conn = ibm_db.pconnect(self.app.config['DBNAME']\n , self.app.config['DBUSER']\n , self.app.config['DBPW'])\n except:\n log.error(\"Database connection failed.\")\n log.error(ibm_db.conn_errormsg())\n raise\n else:\n log.debug(\"Connected to {dbname} user {dbuser} using ****\".format(dbname = self.app.config['DBNAME']\n , dbuser = self.app.config['DBUSER']))\n \n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb): \n if self._conn:\n ibm_db.close(self._conn) \n log.debug(\"Closed {dbname} connection.\".format(dbname = self.app.config['DBNAME']))\n\n\n def _create_table(self, ddl):\n result = ibm_db.exec_immediate(self._conn, ddl)\n\n def query_db(self, query, args=()):\n \"\"\"Submits database query.\n \n Examples:\n \n for user in query_db('select * from users'):\n print user['username'], 'has the id', user['user_id']\n \n for user in query_db('select * from users where username = ?', [the_username]):\n print user['username'], 'has the id', user['user_id']\n\n Returns list\n list = list of rows, where each row is represented using tuple\n \"\"\"\n rows = []\n if self._conn:\n log.debug(\"Running query\\n\" + query)\n log.debug(\"Query params: \" + pprint.pformat(args))\n stmt = ibm_db.prepare(self._conn, query)\n \n for i, param in enumerate(args):\n ibm_db.bind_param(stmt, i, param)\n \n ibm_db.execute(stmt)\n if re.search('create|insert|update|delete', query, re.I):\n return rows\n\n row = ibm_db.fetch_tuple(stmt)\n while (row):\n rows.append(row)\n row = ibm_db.fetch_tuple(stmt)\n\n return rows\n\n\n def _check_table(self, tabschema, tabname):\n \"\"\" Checks if a table is defined.\n Returns True if defined, False otherwise.\n \"\"\"\n cols = []\n rows = []\n log.debug(\"Checking if {tabschema}.{tabname} exist\".format(tabschema = tabschema, tabname = tabname))\n try:\n self.query_db(\"select 1 from {tabschema}.{tabname}\".format(tabschema = tabschema, tabname = tabname))\n log.debug(\"Exist.\")\n return True\n except:\n if 'is an undefined name' in ibm_db.stmt_errormsg(): \n return False\n else:\n raise\n\n def snapappl(self, member = None):\n \"\"\"Returns DB2 application snapshot.\n \"\"\"\n cols = []\n rows = []\n dgt_schema = \"session\"\n dgt_name = \"snapappl\"\n dgt_ddl = \"\"\"\n declare global temporary table session.snapappl (\n SNAPSHOT_TIMESTAMP TIMESTAMP\n , MEMBER SMALLINT\n , HANDLE BIGINT\n , APPL_ID VARCHAR(128)\n , SEQUENCE_NO VARCHAR(4)\n , AUTH_ID VARCHAR(128)\n , APPSTATE VARCHAR(22)\n , PROG_NAME VARCHAR(256)\n , CPU_S DEC(31,11)\n , RR BIGINT\n , RW BIGINT\n , TOTAL_CPU_S DOUBLE\n ) on commit preserve rows\n not logged\n\n \"\"\"\n\n snapappl_delete = \"\"\"\n delete from {dgt_schema}.{dgt_name}\n where snapshot_timestamp not in ( select max(snapshot_timestamp)\n from {dgt_schema}.{dgt_name})\n \"\"\".format(dgt_schema = dgt_schema, dgt_name = dgt_name)\n\n snapappl_insert = \"\"\"\n insert into {dgt_schema}.{dgt_name}\n with total_cpu as (\n select member, cast (sum(\n agent_usr_cpu_time_s + (agent_usr_cpu_time_ms/1000000.0)\n + agent_sys_cpu_time_s + (agent_sys_cpu_time_ms/1000000.0)) as float) as total_cpu_s\n from sysibmadm.snapappl\n group by member\n )\n\n select\n i.snapshot_timestamp\n , i.member\n , i.agent_id as handle\n , i.appl_id\n , i.sequence_no\n , i.primary_auth_id as auth_id\n , i.appl_status as AppState\n , i.appl_name as prog_name\n , (agent_usr_cpu_time_s + (agent_usr_cpu_time_ms/1000000.0)\n + agent_sys_cpu_time_s + (agent_sys_cpu_time_ms/1000000.0)) as cpu_s\n , a.rows_read as rr, a.rows_written as rw\n , total_cpu_s\n from sysibmadm.snapappl_info i, sysibmadm.snapappl a, total_cpu c\n where i.agent_id = a.agent_id\n and i.member = a.member\n and a.member = c.member\n and c.total_cpu_s > 0\n \"\"\".format(dgt_schema = dgt_schema, dgt_name = dgt_name)\n\n member_clause = \"\"\n if member:\n member_clause = \" where member = {member} \".format(member = member)\n\n snapappl_delta = \"\"\"\n select handle, auth_id, appstate, prog_name\n , cast (100 * (sum(cpu_s) / sum(total_cpu_s)) as dec(5,2)) as \"CPU%\"\n , int(sum(rr)/max(tm_s)) as \"RR/s\"\n , int(sum(rw)/max(tm_s)) as \"RW/s\"\n from (\n select handle, auth_id, appstate, prog_name\n , cpu_s - lag(cpu_s) over(partition by appl_id, sequence_no, member order by snapshot_timestamp) as cpu_s\n , rr - lag(rr) over (partition by appl_id, sequence_no, member order by snapshot_timestamp) as rr\n , rw - lag(rw) over (partition by appl_id, sequence_no, member order by snapshot_timestamp) as rw\n , total_cpu_s - lag(total_cpu_s) over (partition by appl_id, sequence_no, member order by snapshot_timestamp) as total_cpu_s\n , timestampdiff(2, CHAR(snapshot_timestamp - lag(snapshot_timestamp) \n over (partition by appl_id, sequence_no, member order by snapshot_timestamp))) as tm_s\n from {dgt_schema}.{dgt_name}\n {member_clause}\n ) x\n where x.total_cpu_s > 0\n and x.tm_s > 0\n group by handle, auth_id, appstate, prog_name\n order by \"CPU%\" desc\n \"\"\".format(dgt_schema = dgt_schema, dgt_name = dgt_name, member_clause = member_clause)\n\n if not self._check_table(dgt_schema, dgt_name):\n self._create_table(dgt_ddl)\n self.query_db(snapappl_insert)\n\n self.query_db(snapappl_insert)\n cols = ['Handle', 'AuthID', 'AppState', 'ProgNm', 'CPU%', 'RR/s', 'RW/s']\n rows = self.query_db(snapappl_delta)\n self.query_db(snapappl_delete)\n\n return cols, rows\n","sub_path":"app/db2/db2sql.py","file_name":"db2sql.py","file_ext":"py","file_size_in_byte":7165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"346939198","text":"from django.contrib.auth import get_user_model\nfrom django.contrib.auth.models import Group\nfrom django.db.models.signals import post_save, pre_save\nfrom django.dispatch import receiver\n\nfrom apps.student.models import StudentProxyModel, ElectivePriority\n\nUser = get_user_model()\n\nSTUDENT_GROUP_NAME = 'STUDENT'\n\n\n@receiver(post_save, sender=StudentProxyModel)\ndef create_student_account(sender, instance, created, *args, **kwargs):\n if created:\n instance.user_type = 'Student'\n instance.is_staff = True\n instance.save()\n student_group, created = Group.objects.get_or_create(name=STUDENT_GROUP_NAME)\n instance.groups.add(student_group)\n\n\n@receiver(pre_save, sender=ElectivePriority)\ndef manage_priority_sememter(sender, instance, *args, **kwargs):\n if instance.student.current_semester is not None:\n instance.session = instance.student.current_semester\n","sub_path":"apps/student/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"554286904","text":"\"\"\"Heterograph NN modules\"\"\"\nimport tensorflow as tf\nfrom tensorflow.keras import layers\n\n__all__ = [\"HeteroGraphConv\"]\n\n\nclass HeteroGraphConv(layers.Layer):\n r\"\"\"A generic module for computing convolution on heterogeneous graphs.\n\n The heterograph convolution applies sub-modules on their associating\n relation graphs, which reads the features from source nodes and writes the\n updated ones to destination nodes. If multiple relations have the same\n destination node types, their results are aggregated by the specified method.\n If the relation graph has no edge, the corresponding module will not be called.\n\n Pseudo-code:\n\n .. code::\n\n outputs = {nty : [] for nty in g.dsttypes}\n # Apply sub-modules on their associating relation graphs in parallel\n for relation in g.canonical_etypes:\n stype, etype, dtype = relation\n dstdata = relation_submodule(g[relation], ...)\n outputs[dtype].append(dstdata)\n\n # Aggregate the results for each destination node type\n rsts = {}\n for ntype, ntype_outputs in outputs.items():\n if len(ntype_outputs) != 0:\n rsts[ntype] = aggregate(ntype_outputs)\n return rsts\n\n Examples\n --------\n\n Create a heterograph with three types of relations and nodes.\n\n >>> import dgl\n >>> g = dgl.heterograph({\n ... ('user', 'follows', 'user') : edges1,\n ... ('user', 'plays', 'game') : edges2,\n ... ('store', 'sells', 'game') : edges3})\n\n Create a ``HeteroGraphConv`` that applies different convolution modules to\n different relations. Note that the modules for ``'follows'`` and ``'plays'``\n do not share weights.\n\n >>> import dgl.nn.pytorch as dglnn\n >>> conv = dglnn.HeteroGraphConv({\n ... 'follows' : dglnn.GraphConv(...),\n ... 'plays' : dglnn.GraphConv(...),\n ... 'sells' : dglnn.SAGEConv(...)},\n ... aggregate='sum')\n\n Call forward with some ``'user'`` features. This computes new features for both\n ``'user'`` and ``'game'`` nodes.\n\n >>> import tensorflow as tf\n >>> h1 = {'user' : tf.random.normal((g.num_nodes('user'), 5))}\n >>> h2 = conv(g, h1)\n >>> print(h2.keys())\n dict_keys(['user', 'game'])\n\n Call forward with both ``'user'`` and ``'store'`` features. Because both the\n ``'plays'`` and ``'sells'`` relations will update the ``'game'`` features,\n their results are aggregated by the specified method (i.e., summation here).\n\n >>> f1 = {'user' : ..., 'store' : ...}\n >>> f2 = conv(g, f1)\n >>> print(f2.keys())\n dict_keys(['user', 'game'])\n\n Call forward with some ``'store'`` features. This only computes new features\n for ``'game'`` nodes.\n\n >>> g1 = {'store' : ...}\n >>> g2 = conv(g, g1)\n >>> print(g2.keys())\n dict_keys(['game'])\n\n Call forward with a pair of inputs is allowed and each submodule will also\n be invoked with a pair of inputs.\n\n >>> x_src = {'user' : ..., 'store' : ...}\n >>> x_dst = {'user' : ..., 'game' : ...}\n >>> y_dst = conv(g, (x_src, x_dst))\n >>> print(y_dst.keys())\n dict_keys(['user', 'game'])\n\n Notes\n -----\n\n HeteroGraphConv requires that there is a module for every ``'etype'`` in an input graph.\n If you want to apply HeteroGraphConv to a subset of a graph's ``'etypes'``, you must\n create a new graph using for example :func:`~dgl.edge_type_subgraph()`.\n\n Parameters\n ----------\n mods : dict[str, nn.Module]\n Modules associated with every edge types. The forward function of each\n module must have a `DGLGraph` object as the first argument, and\n its second argument is either a tensor object representing the node\n features or a pair of tensor object representing the source and destination\n node features.\n aggregate : str, callable, optional\n Method for aggregating node features generated by different relations.\n Allowed string values are 'sum', 'max', 'min', 'mean', 'stack'.\n The 'stack' aggregation is performed along the second dimension, whose order\n is deterministic.\n User can also customize the aggregator by providing a callable instance.\n For example, aggregation by summation is equivalent to the follows:\n\n .. code::\n\n def my_agg_func(tensors, dsttype):\n # tensors: is a list of tensors to aggregate\n # dsttype: string name of the destination node type for which the\n # aggregation is performed\n stacked = tf.stack(tensors, axis=0)\n return tf.reduce_sum(stacked, axis=0)\n\n Attributes\n ----------\n mods : dict[str, nn.Module]\n Modules associated with every edge types.\n \"\"\"\n\n def __init__(self, mods, aggregate=\"sum\"):\n super(HeteroGraphConv, self).__init__()\n self.mods = mods\n # Do not break if graph has 0-in-degree nodes.\n # Because there is no general rule to add self-loop for heterograph.\n for _, v in self.mods.items():\n set_allow_zero_in_degree_fn = getattr(\n v, \"set_allow_zero_in_degree\", None\n )\n if callable(set_allow_zero_in_degree_fn):\n set_allow_zero_in_degree_fn(True)\n if isinstance(aggregate, str):\n self.agg_fn = get_aggregate_fn(aggregate)\n else:\n self.agg_fn = aggregate\n\n def call(self, g, inputs, mod_args=None, mod_kwargs=None):\n \"\"\"Forward computation\n\n Invoke the forward function with each module and aggregate their results.\n\n Parameters\n ----------\n g : DGLGraph\n Graph data.\n inputs : dict[str, Tensor] or pair of dict[str, Tensor]\n Input node features.\n mod_args : dict[str, tuple[any]], optional\n Extra positional arguments for the sub-modules.\n mod_kwargs : dict[str, dict[str, any]], optional\n Extra key-word arguments for the sub-modules.\n\n Returns\n -------\n dict[str, Tensor]\n Output representations for every types of nodes.\n \"\"\"\n if mod_args is None:\n mod_args = {}\n if mod_kwargs is None:\n mod_kwargs = {}\n outputs = {nty: [] for nty in g.dsttypes}\n if isinstance(inputs, tuple):\n src_inputs, dst_inputs = inputs\n for stype, etype, dtype in g.canonical_etypes:\n rel_graph = g[stype, etype, dtype]\n if stype not in src_inputs or dtype not in dst_inputs:\n continue\n dstdata = self.mods[etype](\n rel_graph,\n (src_inputs[stype], dst_inputs[dtype]),\n *mod_args.get(etype, ()),\n **mod_kwargs.get(etype, {})\n )\n outputs[dtype].append(dstdata)\n else:\n for stype, etype, dtype in g.canonical_etypes:\n rel_graph = g[stype, etype, dtype]\n if stype not in inputs:\n continue\n dstdata = self.mods[etype](\n rel_graph,\n (inputs[stype], inputs[dtype]),\n *mod_args.get(etype, ()),\n **mod_kwargs.get(etype, {})\n )\n outputs[dtype].append(dstdata)\n rsts = {}\n for nty, alist in outputs.items():\n if len(alist) != 0:\n rsts[nty] = self.agg_fn(alist, nty)\n return rsts\n\n\ndef get_aggregate_fn(agg):\n \"\"\"Internal function to get the aggregation function for node data\n generated from different relations.\n\n Parameters\n ----------\n agg : str\n Method for aggregating node features generated by different relations.\n Allowed values are 'sum', 'max', 'min', 'mean', 'stack'.\n\n Returns\n -------\n callable\n Aggregator function that takes a list of tensors to aggregate\n and returns one aggregated tensor.\n \"\"\"\n if agg == \"sum\":\n fn = tf.reduce_sum\n elif agg == \"max\":\n fn = tf.reduce_max\n elif agg == \"min\":\n fn = tf.reduce_min\n elif agg == \"mean\":\n fn = tf.reduce_mean\n elif agg == \"stack\":\n fn = None # will not be called\n else:\n raise DGLError(\n \"Invalid cross type aggregator. Must be one of \"\n '\"sum\", \"max\", \"min\", \"mean\" or \"stack\". But got \"%s\"' % agg\n )\n if agg == \"stack\":\n\n def stack_agg(inputs, dsttype): # pylint: disable=unused-argument\n if len(inputs) == 0:\n return None\n return tf.stack(inputs, axis=1)\n\n return stack_agg\n else:\n\n def aggfn(inputs, dsttype): # pylint: disable=unused-argument\n if len(inputs) == 0:\n return None\n stacked = tf.stack(inputs, axis=0)\n return fn(stacked, axis=0)\n\n return aggfn\n","sub_path":"python/dgl/nn/tensorflow/hetero.py","file_name":"hetero.py","file_ext":"py","file_size_in_byte":8950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"623316424","text":"from adaboost import *\r\nfrom utils import *\r\n\r\n\r\nclass CascadeAdaBoost:\r\n def __init__(self, const_f, const_d, total_fpr, total_dr):\r\n self.const_f = const_f\r\n self.const_d = const_d\r\n self.total_fpr = total_fpr\r\n self.total_dr = total_dr\r\n self.adaboosts = []\r\n self.FPRs = []\r\n self.DRs = []\r\n self.idxs_feature_stumps = None\r\n\r\n def fit(self, X, y, path_save=None):\r\n x_train, y_train = X, y\r\n if len(self.adaboosts) != 0:\r\n general_fpr = np.prod(np.array(self.FPRs))\r\n general_dr = np.prod(np.array(self.DRs))\r\n else:\r\n general_fpr = 1\r\n general_dr = 1\r\n k = len(self.adaboosts)\r\n while general_fpr > self.total_fpr:\r\n ab = AdaBoost()\r\n weights = np.array([1 / len(x_train) for i in range(len(x_train))])\r\n adaboost_completed = False\r\n thr = 0\r\n thr_step = 0.01\r\n i = 0\r\n while not adaboost_completed:\r\n print(f'Classif {i}')\r\n weights = ab.help_train(x_train, y_train, weights)\r\n y_pred, prob = ab.predict(x_train, thr)\r\n dr = detection_rate(y_pred, y_train)\r\n while self.const_d > dr:\r\n thr -= thr_step\r\n y_pred, prob = ab.predict(x_train, thr)\r\n dr = detection_rate(y_pred, y_train)\r\n fpr = false_positive_rate(y_pred, y_train)\r\n print(f'FPR={fpr} DR={dr}')\r\n i += 1\r\n if self.const_f > fpr:\r\n adaboost_completed = True\r\n ab.thr = thr\r\n\r\n self.adaboosts.append(ab)\r\n y_pred, prob = ab.predict(x_train, ab.thr)\r\n fpr = false_positive_rate(y_pred, y_train)\r\n self.FPRs.append(fpr)\r\n dr = detection_rate(y_pred, y_train)\r\n self.DRs.append(dr)\r\n\r\n if path_save is not None:\r\n save(path_save, f'ab_{k}_block', ab)\r\n general_fpr = np.prod(np.array(self.FPRs))\r\n\r\n print(f'General FPR {general_fpr} {k}')\r\n k += 1\r\n y_pred, prob = ab.predict(x_train, ab.thr)\r\n mask = np.logical_or(np.logical_and(y_pred == 1, y_train == -1), y_train == 1)\r\n print(f'Sample train before: {len(y_train)} Class 1:{np.sum(y_train==1)} Class 0:{np.sum(y_train==-1)}')\r\n x_train = x_train[mask]\r\n y_train = y_train[mask]\r\n print(f'Sample train before: {len(y_train)} Class 1:{np.sum(y_train==1)} Class 0:{np.sum(y_train==-1)}')\r\n\r\n def predict(self, X):\r\n prob_general = 1\r\n for i in range(len(self.adaboosts)):\r\n y_pred, prob = self.adaboosts[i].predict(X, self.adaboosts[i].thr)\r\n prob_general *= prob\r\n if y_pred == -1:\r\n break\r\n return y_pred, prob_general\r\n\r\n def get_idxs_feature_stumps(self):\r\n self.idxs_feature_stumps = []\r\n for i in range(len(self.adaboosts)):\r\n self.idxs_feature_stumps.extend(self.adaboosts[i].get_idxs_feature_stumps())\r\n self.idxs_feature_stumps = set(self.idxs_feature_stumps)\r\n return self.idxs_feature_stumps\r\n\r\n def load_adaboosts(self, loading_path, X=None, y=None):\r\n dirs = os.listdir(loading_path)\r\n for i in range(1, len(dirs) + 1):\r\n self.adaboosts.append(load(loading_path, f'ab_{i}_block'))\r\n print(f'Add {i} block of cascade. Numbers of WCL: {len(self.adaboosts[-1].alphs)}')\r\n if X is not None and y is not None:\r\n y_pred, _ = self.adaboosts[-1].predict(X)\r\n self.DRs.append(detection_rate(y_pred, y))\r\n self.FPRs.append(false_positive_rate(y_pred, y))\r\n # self.adaboosts[-1].thr = 6\r\n self.get_idxs_feature_stumps()\r\n\r\n","sub_path":"cascade_adaboost.py","file_name":"cascade_adaboost.py","file_ext":"py","file_size_in_byte":3897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"639973174","text":"import torch\nfrom torch.nn.parameter import Parameter\nfrom torch.nn import init\nimport torch.nn.functional as F\nfrom efficientnet_pytorch import EfficientNet\nfrom torch import nn\nfrom PIL import Image\nfrom albumentations import (Compose, Resize, Normalize)\nfrom albumentations.pytorch.transforms import ToTensor\n\nimport os\nimport numpy as np\nfrom typing import List\nimport math\n\n\nIMAGE_SIZE = 224\nPATH_TO_WEIGHTS = 'models/EfficientNet_car_model_classification/model_e4.pth'\n\n\nclass LazyLoadModule(nn.Module):\n \"\"\"Lazy buffer/parameter loading using load_state_dict_pre_hook\n\n Define all buffer/parameter in `_lazy_buffer_keys`/`_lazy_parameter_keys` and\n save buffer with `register_buffer`/`register_parameter`\n method, which can be outside of __init__ method.\n Then this module can load any shape of Tensor during de-serializing.\n\n Note that default value of lazy buffer is torch.Tensor([]), while lazy parameter is None.\n \"\"\"\n _lazy_buffer_keys: List[str] = [] # It needs to be override to register lazy buffer\n _lazy_parameter_keys: List[str] = [] # It needs to be override to register lazy parameter\n\n def __init__(self):\n super(LazyLoadModule, self).__init__()\n for k in self._lazy_buffer_keys:\n self.register_buffer(k, torch.tensor([]))\n for k in self._lazy_parameter_keys:\n self.register_parameter(k, None)\n self._register_load_state_dict_pre_hook(self._hook)\n\n def _hook(self, state_dict, prefix, local_metadata, strict, missing_keys,\n unexpected_keys, error_msgs):\n for key in self._lazy_buffer_keys:\n self.register_buffer(key, state_dict[prefix + key])\n\n for key in self._lazy_parameter_keys:\n self.register_parameter(key, Parameter(state_dict[prefix + key]))\n\n\nclass LazyLinear(LazyLoadModule):\n \"\"\"Linear module with lazy input inference\n\n `in_features` can be `None`, and it is determined at the first time of forward step dynamically.\n \"\"\"\n\n __constants__ = ['bias', 'in_features', 'out_features']\n _lazy_parameter_keys = ['weight']\n\n def __init__(self, in_features, out_features, bias=True):\n super(LazyLinear, self).__init__()\n self.in_features = in_features\n self.out_features = out_features\n if bias:\n self.bias = Parameter(torch.Tensor(out_features))\n else:\n self.register_parameter('bias', None)\n\n if in_features is not None:\n self.weight = Parameter(torch.Tensor(out_features, in_features))\n self.reset_parameters()\n\n def reset_parameters(self):\n init.kaiming_uniform_(self.weight, a=math.sqrt(5))\n if self.bias is not None:\n fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)\n bound = 1 / math.sqrt(fan_in)\n init.uniform_(self.bias, -bound, bound)\n\n def forward(self, input):\n if self.weight is None:\n self.in_features = input.shape[-1]\n self.weight = Parameter(torch.Tensor(self.out_features, self.in_features))\n self.reset_parameters()\n\n # Need to send lazy defined parameter to device...\n self.to(input.device)\n return F.linear(input, self.weight, self.bias)\n\n def extra_repr(self):\n return 'in_features={}, out_features={}, bias={}'.format(\n self.in_features, self.out_features, self.bias is not None\n )\n\nclass CarsRecognizer(nn.Module):\n def __init__(self, n_classes):\n super(CarsRecognizer, self).__init__()\n self.backbone = EfficientNet.from_name('efficientnet-b5')\n self.avg_pooling = nn.AdaptiveAvgPool2d(1)\n self.dropout = nn.Dropout(0.15)\n self.linear = LazyLinear(in_features=None, out_features=n_classes)\n\n def forward(self, img):\n features = self.backbone.extract_features(img)\n features = self.avg_pooling(features).squeeze()\n features = self.dropout(features)\n logits = self.linear(features)\n return logits\n\ntest_transform = Compose([Resize(IMAGE_SIZE, IMAGE_SIZE), Normalize(), ToTensor()])\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\nmodel = CarsRecognizer(n_classes=11)\ncp = torch.load(PATH_TO_WEIGHTS, map_location=device)\nmodel.load_state_dict(cp['state_dict'])\n\ndef predict(image_path):\n id2cartype = {0: 'MAZDA_3_B',\n 1: 'КIА_RIО_B',\n 2: 'VОLКSWАGЕN_РОLО_B',\n 3: 'VOLVO_ALLVOLVO_C',\n 4: 'НУUNDАI_SОLАRIS_B',\n 5: 'LADA_PRIORA_B',\n 6: 'VОLКSWАGЕN_TIGUAN_B',\n 7: 'KAMAZ_ALLKAMAZ_C',\n 8: 'TOYOTA_RАV4_B',\n 9: 'SCANIA_ALLSCANIA_C',\n 10: 'RЕNАULТ_DUSТЕR_B'}\n image = np.asarray(Image.open(image_path).convert('RGB'))\n image_tensor = test_transform(image=image)['image']\n image_tensor = image_tensor.unsqueeze(0)\n\n model.eval()\n with torch.no_grad():\n logits = model(image_tensor).squeeze()\n pred = torch.argmax(logits).item()\n\n res = id2cartype[pred].split('_')\n return res[0], res[1], res[2]","sub_path":"classifier.py","file_name":"classifier.py","file_ext":"py","file_size_in_byte":5080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"162372287","text":"import time, asyncio\n\n\n@asyncio.coroutine\ndef task1():\n print('task1 start work-----')\n yield from asyncio.sleep(2)\n print('task1 finish the task')\n return task1.__name__\n\n\n@ asyncio.coroutine\ndef task2():\n print('task2 start work-----')\n yield from asyncio.sleep(5)\n print('task1 finish the task')\n return task2.__name__\n\n\nif __name__ == '__main__':\n loop = asyncio.get_event_loop()\n tasks = [task1(), task2()]\n loop.run_until_complete(asyncio.wait(tasks)) # 任务2没完成时, 进行任务1\n loop.close()","sub_path":"MACHINE/ThreadAndProcecss/SynergeticProcess/demo1.py","file_name":"demo1.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"240866979","text":"from typing import List, Tuple\n\nimport cv2\nimport numpy as np\nfrom imutils.object_detection import non_max_suppression\nfrom mmif import Mmif, View, DocumentTypes, AnnotationTypes\n\nBOX_MIN_CONF = 0.1\nSAMPLE_RATIO = 30\nnet = cv2.dnn.readNet(\"frozen_east_text_detection.pb\")\n\n\ndef process_image(f):\n return f\n\n\ndef decode_predictions(scores, geometry, box_min_conf=BOX_MIN_CONF):\n \"\"\"\n Taken from pyimagesearch, convert results to rectangles and confidences\n \"\"\"\n\n # grab the number of rows and columns from the scores volume, then\n # initialize our set of bounding box rectangles and corresponding\n # confidence scores\n (numRows, numCols) = scores.shape[2:4]\n rects = []\n confidences = []\n\n # loop over the number of rows\n for y in range(0, numRows):\n # extract the scores (probabilities), followed by the\n # geometrical data used to derive potential bounding box\n # coordinates that surround text\n scoresData = scores[0, 0, y]\n xData0 = geometry[0, 0, y]\n xData1 = geometry[0, 1, y]\n xData2 = geometry[0, 2, y]\n xData3 = geometry[0, 3, y]\n anglesData = geometry[0, 4, y]\n\n # loop over the number of columns\n for x in range(0, numCols):\n # if our score does not have sufficient probability,\n # ignore it\n if scoresData[x] < box_min_conf:\n continue\n\n # compute the offset factor as our resulting feature\n # maps will be 4x smaller than the input image\n (offsetX, offsetY) = (x * 4.0, y * 4.0)\n\n # extract the rotation angle for the prediction and\n # then compute the sin and cosine\n angle = anglesData[x]\n cos = np.cos(angle)\n sin = np.sin(angle)\n\n # use the geometry volume to derive the width and height\n # of the bounding box\n h = xData0[x] + xData2[x]\n w = xData1[x] + xData3[x]\n\n # compute both the starting and ending (x, y)-coordinates\n # for the text prediction bounding box\n endX = int(offsetX + (cos * xData1[x]) + (sin * xData2[x]))\n endY = int(offsetY - (sin * xData1[x]) + (cos * xData2[x]))\n startX = int(endX - w)\n startY = int(endY - h)\n\n # add the bounding box coordinates and probability score\n # to our respective lists\n rects.append((startX, startY, endX, endY))\n confidences.append(scoresData[x])\n\n # return a tuple of the bounding boxes and associated confidences\n return rects, confidences\n\n\ndef image_to_east_boxes(image: np.array) -> List[Tuple[int, int, int, int]]:\n (newW, newH) = (320, 320) # newH and newW must a multiple of 32.\n (H, W) = image.shape[:2]\n rW = W / float(newW)\n rH = H / float(newH)\n\n # resize the frame, this time ignoring aspect ratio\n image = cv2.resize(image, (newW, newH))\n\n # construct a blob from the frame and then perform a forward pass\n # of the model to obtain the two output layer sets\n blob = cv2.dnn.blobFromImage(\n image, 1.0, (newW, newH), (123.68, 116.78, 103.94), swapRB=True, crop=False\n )\n net.setInput(blob)\n layerNames = [\"feature_fusion/Conv_7/Sigmoid\", \"feature_fusion/concat_3\"]\n (scores, geometry) = net.forward(layerNames)\n (rects, confidences) = decode_predictions(scores, geometry)\n boxes = non_max_suppression(np.array(rects), probs=confidences)\n box_list = []\n for (startX, startY, endX, endY) in boxes:\n # scale the bounding box coordinates based on the respective\n # ratios\n startX = int(startX * rW)\n startY = int(startY * rH)\n endX = int(endX * rW)\n endY = int(endY * rH)\n box_list.append((startX, startY, endX, endY))\n return box_list\n\ndef get_target_frame_numbers(mmif, frame_type, frames_per_segment=2):\n def convert_msec(time_msec):\n import math\n return math.floor(time_msec * 29.97) # todo 6/1/21 kelleylynch assuming frame rate\n\n views_with_tframe = [\n tf_view\n for tf_view in mmif.get_all_views_contain(AnnotationTypes.TimeFrame)\n if tf_view.get_annotations(AnnotationTypes.TimeFrame, frameType=frame_type)\n ]\n frame_number_ranges = [\n (tf_annotation.properties[\"start\"], tf_annotation.properties[\"end\"])\n if tf_view.metadata.get_parameter(\"timeUnit\") in [\"frames\", \"frame\"]\n else (convert_msec(tf_annotation.properties[\"start\"]), convert_msec(tf_annotation.properties[\"end\"]))\n for tf_view in views_with_tframe\n for tf_annotation in tf_view.get_annotations(AnnotationTypes.TimeFrame, frameType=frame_type)\n ]\n target_frames = list(set([int(f) for start, end in frame_number_ranges\n for f in np.linspace(start, end, frames_per_segment, dtype=int)]))\n\n return target_frames\n\ndef boxes_from_target_frames(target_frames:List[int], cap:cv2.VideoCapture, new_view:View):\n for frame_number in target_frames:\n cap.set(cv2.CAP_PROP_POS_FRAMES, frame_number)\n _, f = cap.read()\n result_list = image_to_east_boxes(f)\n for box in result_list:\n bb_annotation = new_view.new_annotation(AnnotationTypes.BoundingBox)\n bb_annotation.add_property(\"boxType\", \"text\")\n x0, y0, x1, y1 = box\n bb_annotation.add_property(\n \"coordinates\", [[x0, y0], [x1, y0], [x0, y1], [x1, y1]]\n )\n bb_annotation.add_property(\"frame\", frame_number)\n \n\ndef run_EAST_video(mmif: Mmif, new_view: View, **kwargs) -> Mmif:\n cap = cv2.VideoCapture(mmif.get_document_location(DocumentTypes.VideoDocument))\n counter = 0\n idx = 0\n if \"stopAt\" in kwargs:\n stop_at = int(kwargs[\"stopAt\"])\n else:\n stop_at = 30*60*60*5 #five hours\n if \"frameType\" in kwargs:\n frame_type = kwargs[\"frameType\"]\n else:\n frame_type = \"\"\n target_frames = []\n if frame_type:\n target_frames = get_target_frame_numbers(mmif, frame_type, 2)\n boxes_from_target_frames(target_frames, cap, new_view)\n else:\n while cap.isOpened():\n if counter > stop_at:\n break\n ret, f = cap.read()\n if target_frames:\n if counter not in target_frames:\n counter += 1 #todo move this\n continue\n if not ret:\n break\n if (counter % SAMPLE_RATIO == 0) or (counter in target_frames):\n result_list = image_to_east_boxes(f)\n for box in result_list:\n idx += 1\n bb_annotation = new_view.new_annotation(AnnotationTypes.BoundingBox)\n bb_annotation.add_property(\"boxType\", \"text\")\n x0, y0, x1, y1 = box\n bb_annotation.add_property(\n \"coordinates\", [[x0, y0], [x1, y0], [x0, y1], [x1, y1]]\n )\n bb_annotation.add_property(\"frame\", counter)\n counter += 1\n return mmif\n\n\ndef run_EAST_image(mmif: Mmif, new_view:View) -> Mmif:\n image = cv2.imread(mmif.get_document_location(DocumentTypes.ImageDocument))\n box_list = image_to_east_boxes(image)\n for idx, box in enumerate(box_list):\n annotation = new_view.new_annotation(f\"td{idx}\", AnnotationTypes.BoundingBox)\n annotation.add_property(\"boxType\", \"text\")\n x0, y0, x1, y1 = box\n annotation.add_property(\n \"coordinates\", [[x0, y0], [x1, y0], [x0, y1], [x1, y1]]\n )\n return mmif\n","sub_path":"east_utils.py","file_name":"east_utils.py","file_ext":"py","file_size_in_byte":7626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"467586083","text":"id_video = []\n\nvideo_name = []\nfor index, row in category.iterrows():\n x = row['items']\n \n id_video.append(x['id'])\n \n video_name.append(x['snippet']['title'])\n\ncategory = pd.DataFrame(zip(id_video, video_name), columns = ['category_id', 'category'])\n\ncategory['category_id'] = category['category_id'].astype('int64')\n\n#Instead of maintaining two different datasets, I merged both into one by adding categories into the original 'data' dataset.\ndata = pd.merge(data, category, on = 'category_id', how = 'inner')\n\ndata['trending_date'] = pd.to_datetime(data['trending_date'], \n format = '%y.%d.%m')\n\ndata.insert(4, 'publish_date', \n data['publish_time'].dt.date)\n\ndata['publish_time'] = data['publish_time'].dt.time\n","sub_path":"data/video_cat.py","file_name":"video_cat.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"507658558","text":"# Plot 50 each sine normal and abnormal pattern\n# --------------------------------------------------------------#\n# Plot just 1 sine wave of 1000 points and 10 sec duration so\n# sampling time interval will be 10/1000 = .01 sec\n# So sampling freq will be 1/.01 = 100 Hz\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport random\nimport os\nfrom sklearn.metrics import f1_score, confusion_matrix, accuracy_score\nfrom sklearn.metrics import recall_score\nfrom sklearn.metrics import precision_score\nimport numpy as np\nfrom scipy import signal as sg\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\n\n# Step 1 - Draw 50 normal pattern\n# --------------------------------------------------------------#\n\nfi = 0.5 # Inherent freq 1 so 1 cycle time is 1/1 = 1 sec so 10 sec = 10 cycles\nt = 10\nfs = 100 # Sampling freq 100 so sampling time interval is 0.01\nsample_points = 1000\nts = 0.01\na = 2\n\n# 1000 Signal values as a function of 1000 time values\ntime = np.arange(0, 10, 0.01)\n\n# Plot 1 sine pattern\nsig1_sine = a * np.sin(2 * np.pi * fi * time)\n# Create normal plots also with different a and f values?\n\n# Introduce randomness to data\nnoise = np.random.normal(0, .1, sig1_sine.shape)\nnew_sig1_sine = sig1_sine + noise\nprint(new_sig1_sine)\nsine_pattern = new_sig1_sine.copy()\n\n# Crate 50 patterns\n\nfor n in range(49):\n new_row = a * np.sin(2 * np.pi * fi * time)\n noise1 = np.random.normal(0, .1, new_row.shape)\n new_pattern = new_row + noise1\n sine_pattern = np.vstack([sine_pattern, new_pattern])\n\n# Plot this pattern (Change ndarray to df to plot)\n# Change ndarray to df\ndf = pd.DataFrame(sine_pattern)\n\n# Single Plot\ndf.iloc[0].plot()\nplt.show()\n\n# # Put all images together to see patterns of normal synthetic sine wave\n# from PIL import Image\n# # All 50 normal plots\n# images_list = []\n# for x in range(49):\n# f = plt.figure()\n# df.iloc[x].plot()\n# plt.show()\n# # f.savefig('Sine Normal Pattern' + '.pdf', bbox_inches='tight')\n# f.savefig('/Users/sylviachadha/Desktop/Tools/PyCharm/thesis_poc/normal_images/' + str(x))\n# image1 = Image.open(r'/Users/sylviachadha/Desktop/Tools/PyCharm/thesis_poc/normal_images/' + str(x) + '.png')\n# im1 = image1.convert('RGB')\n# images_list.append(im1)\n# im1.save(r'/Users/sylviachadha/Desktop/Tools/PyCharm/thesis_poc/normal_images/normal_images.pdf',save_all=True, append_images=images_list)\n#\n\n# Step 2 - Draw 50 abnormal random pattern\n# --------------------------------------------------------------#\n\ndf_a = df.copy()\n\ntot_rows = len(df_a.index)\nprint(tot_rows)\n\nanomalydf = pd.DataFrame()\nfor index, row in df_a.iterrows():\n print(row)\n r = random.randint(0, 850)\n print(r)\n index_list = [range(r, r + 100, 1)]\n print(index_list)\n distort_f_index = r\n print(distort_f_index)\n distort_l_index = r+100\n print(distort_l_index)\n n = 50\n r = random.randint(-3, 3)\n r1 = random.randint(-3, 3)\n\n x1 = [r] * n\n x2 = [r1] * n\n x1_arr = np.array(x1)\n x2_arr = np.array(x2)\n\n noise1 = np.random.normal(0, .1, x1_arr.shape)\n noise2 = np.random.normal(0, .1, x2_arr.shape)\n\n new_x1 = x1_arr + noise1\n new_x2 = x2_arr + noise2\n\n df_arr1 = pd.DataFrame(new_x1, columns=['Signal'])\n df_arr2 = pd.DataFrame(new_x2, columns=['Signal'])\n frame = [df_arr1, df_arr2]\n df_distort2 = pd.concat(frame)\n print(df_distort2)\n df_distort2.index = np.arange(start=distort_f_index, stop=distort_l_index, step=1)\n\n dfn1 = row.iloc[0:distort_f_index].to_frame(name=\"Signal\")\n dfn3 = row.iloc[distort_l_index:].to_frame(name=\"Signal\")\n\n # Concatenate dfn1 + df_distort1 + dfn3\n frames = [dfn1, df_distort2, dfn3]\n new_row = pd.concat(frames)\n new_row = new_row.transpose()\n anomalydf = anomalydf.append(new_row, ignore_index=True)\n\n\n# Single Plot\nanomalydf.iloc[10].plot()\nplt.show()\n\n# # All Plots - Put all images together to see patterns of abnormal synthetic sine wave\n# from PIL import Image\n# # All 50 abnormal plots\n# imagesa_list = []\n# for x in range(tot_rows):\n# f = plt.figure()\n# anomalydf.iloc[x].plot()\n# plt.show()\n# # f.savefig('Sine Normal Pattern' + '.pdf', bbox_inches='tight')\n# f.savefig('/Users/sylviachadha/Desktop/Tools/PyCharm/thesis_poc/anomaly_images/' + str(x))\n# image2 = Image.open(r'/Users/sylviachadha/Desktop/Tools/PyCharm/thesis_poc/anomaly_images/' + str(x) + '.png')\n# im2 = image2.convert('RGB')\n# imagesa_list.append(im2)\n# im2.save(r'/Users/sylviachadha/Desktop/Tools/PyCharm/thesis_poc/anomaly_images/anomaly_images.pdf',save_all=True, append_images=imagesa_list)\n\n\n# Apply ML Algorithms for Supervised learning\n# df has 50 normal patterns\n# anomalydf has 50 abnormal patterns\n# Most ML Algorithms accept ndarray instead of dataframe\n\n\n# AIM - To run ML Supervised algorithms on Sine\n# normal and abnormal data\n\n# ------------------------------\n# STEP 1 Change df to ndarray\n# ------------------------------\ndf_arr = df.to_numpy()\nanomalydf_arr = anomalydf.to_numpy()\n\n# Concatenate total data (normal + abnormal)\nX_2D = np.concatenate((df_arr, anomalydf_arr))\n\n\n# -------------------------------------------\n# STEP 2 See plots to verify 2D Input data\n# -------------------------------------------\n# Change to df to see plot to verify normal & abnormal\nX_2D_df = pd.DataFrame(X_2D)\n\n# Can see sine wave from index 0 to 49 and triangle from\n# index 50 to 99\nX_2D_df1 = X_2D_df.iloc[52]\nplt.plot(X_2D_df1)\nplt.show()\n\n# -------------------------------------------\n# STEP 3 Data to input to ML Algorithms\n# X_2D and y_1D\n# --------------------------------------------\n\n# X_2D as created above\n# y is label\ny_class0 = np.zeros(50)\ny_class1 = np.ones(50)\ny = np.concatenate((y_class0, y_class1))\ny_1D = y\n\n# -------------------------------------------------------------------\n# Step 4 Reduce dataset X_2D(53,1000) and y_1D(53) # Features and labels\n# All Class 0 and only 3 samples of Class 1\n# Use this 94.3% Class 0 and 5.7% Class 1 for Training\n# -------------------------------------------------------------------\n\n# Reduce Abnormal class to just 3 samples by removing 47 observations\n# from tail of X and Y which are currently 100\n\n# ** Need to try different combinations of 3 samples which are\n# ** given for training of anomaly class\n\nn = 47\nX_short = X_2D[:-n, :] # :-n will include all rows starting from 0 upto excluding the last 47 rows\nprint(\"Filtered data\", X_short)\nprint(\"Length of Filtered Data\", len(X_short))\n\n# Remove 47 observations from tail of Y\ny_short = np.delete(y_1D, range(53, 100, 1))\n\n# -------------------------------------------------------------------\n# STEP 5 - Split into Train & Test to feed to DL Algorithm\n# ----------------------------------------------------------\n# Check Shape of data and target/label\nprint(\"Shape of data\", X_short.shape)\nprint(\"Shape of label\", y_short.shape)\n\n# Split into Train and Test\nX_train, X_test, y_train, y_test = train_test_split(X_short, y_short, test_size=0.2) # random_state=4)\n\n# print shapes of new X objects\nprint(\"X_train shape\", X_train.shape)\nprint(\"X_test shape\", X_test.shape)\n\n# print shapes of new y objects\nprint(\"y_train shape\", y_train.shape)\nprint(\"y_test shape\", y_test.shape)\n\n\n# -----------------------------------------------------------------#\n# STEP 6 - Feature Scaling, compulsory for deep learning, scale\n# everything all features.\n# fit & transform on train but only transform on test\n# -----------------------------------------------------------------#\nfrom sklearn.preprocessing import StandardScaler\n\nsc = StandardScaler()\nX_train = sc.fit_transform(X_train)\nX_test = sc.transform(X_test) # Need to scale again when u augment test data\n\n\n# -----------------------------------------------------------------#\n# STEP 7 - Modelling\n# -----------------------------------------------------------------#\n# Input ready for model as in previous step-\n# X_train_short_V5 and y_train_short_num for TRAINING MODEL - 8579\n# X_test_short_V5 and y_test_short_num for TESTING MODEL - 961\n\n# Modelling # 1. Import 2. Instantiate 3. Fit 4. Predict\n# --------------------------------------------------------------\n# Model 1 - ANN Artificial Neural Network\n# --------------------------------------------------------------\n# 1. Import\nimport tensorflow as tf\n\n# 2. Instantiate - Building an ANN (Architecture)\n# Ann will be created as an object of class-(sequential class) which\n# allows to build ann as a sequence of layers [input-hidden-output] as opposed to computational\n# graph (like boltzmann m/c which r neurons connected anyway, not in\n# successive layers)\nann = tf.keras.models.Sequential()\nann.add(tf.keras.layers.Dense(units=5, activation='relu')) # Shallow NN i/p n hidden layer\nann.add(tf.keras.layers.Dense(units=5, activation='relu')) # Now deep NN hidden layer added\nann.add(tf.keras.layers.Dense(units=1, activation='sigmoid')) # o/p layer binary so 1 neuron needed\n\n# 3. Compile and train ANN\nann.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) # compile\nann.fit(X_train, y_train, batch_size=30, epochs=20)\n\n# 4. Predict\ny_pred_ann = ann.predict(X_test) # Predict\n# Convert predicted probability to binary outcome 0 or 1\ny_pred_ann1 = (y_pred_ann > 0.5)\n# Change y_test_short_num to ndarray\n# y_test_short_num_arr = y_test.to_numpy() not required as y_test already ndarray\nmerged_result = np.concatenate((y_pred_ann1.reshape(len(y_pred_ann1), 1), y_test.reshape(len(y_test), 1)), 1)\ny_pred = merged_result[:, 0]\n\n# STEP 8 - Evaluation Metrics\n# ------------------------------------------------------------------\n\ndef evaluate_model(y_actual, y_pred):\n cm = confusion_matrix(y_actual, y_pred)\n print(cm)\n acc = accuracy_score(y_actual, y_pred)\n recall = recall_score(y_actual, y_pred)\n precision = precision_score(y_actual, y_pred)\n f1score = f1_score(y_actual, y_pred)\n return (cm, acc, recall, precision, f1score)\n\n\n# Model 1 ANN\n\nann_result = evaluate_model(y_test, y_pred)\nprint(\"ANN values for cm, accuracy, recall, precision and f1 score\", ann_result)\n\n# ---------------------------------------------------------------------\n# PART B - TOTAL 53 DATA = 42 TRAIN DATA AND 11 TEST DATA\n# ---------------------------------------------------------------------\n# 10 TEST DATA added from minority class from remaining 47 samples\n# which model has not seen\n# ---------------------------------------------------------------------\n# STEP 1 - Make another test set with more anomaly (class triangle)\n# --------------------------------------------------------------------\n\n# Step 1 Make y\ny_1D_new = y_1D.copy() # original y which has 100 labels\ny_test_xl = y_test.copy()\n\ny_test_xl1 = np.delete(y_1D_new, range(0, 90, 1))\n\ny_test_merged = np.concatenate((y_test_xl, y_test_xl1))\nprint(\"Merged labels of new test set\", y_test_merged)\n\n# Step 2 Make X\nX_2D_new = X_2D.copy()\nX_test_xl = X_test.copy()\n\n# Keep 10 observations from tail of X\nn = 10\nX_2D_new = X_2D_new[-n:, :]\nprint(\"Filtered data\", X_2D_new)\nprint(\"Length of Filtered Data\", len(X_2D_new))\n\n# Merge X_test_xl and new_data_X1_copy\nX_test_merged = np.concatenate((X_test_xl, X_2D_new))\nprint(\"Length of new test set\", len(X_test_merged))\n\nX_test_merged = sc.transform(X_test_merged)\n\n# STEP 2 - Predictions and Evaluation results on enhanced test set\n# -------------------------------------------------------------------\n# Take predictions on augmented test dataset\n\n# Model 1\n# 4. Predict\ny_pred_merged_ann = ann.predict(X_test_merged)\ny_pred_merged_ann1 = (y_pred_merged_ann > 0.5)\nmerged_result = np.concatenate((y_pred_merged_ann1.reshape(len(y_pred_merged_ann1), 1), y_test_merged.reshape(len(y_test_merged), 1)), 1)\ny_pred1 = merged_result[:, 0]\n\nann_result1 = evaluate_model(y_test_merged, y_pred1)\nprint(\"ANN values for cm, accuracy, recall, precision and f1 score\", ann_result1)\n\n# ---------------------------------------------------------------------\n# PART C - TOTAL 53 DATA = 42 TRAIN DATA AND 11 TEST DATA\n# ---------------------------------------------------------------------\n# 40 TEST DATA added from minority class from remaining 47 samples\n# which model has not seen\n# ---------------------------------------------------------------------\n# STEP 1 - Make another test set with more anomaly (class triangle)\n# --------------------------------------------------------------------\n\n# Step 1 Make y\ny_1D_new1 = y_1D.copy() # original y which has 100 labels\ny_test_xl1 = y_test.copy()\n\ny_test_xl2 = np.delete(y_1D_new1, range(0, 60, 1))\n\ny_test_merged1 = np.concatenate((y_test_xl1, y_test_xl2))\nprint(\"Merged labels of new test set\", y_test_merged1)\n\n# Step 2 Make X\nX_2D_new1 = X_2D.copy()\nX_test_xl1 = X_test.copy()\n\n# Keep 40 observations from tail of X\nn = 40\nX_2D_new1 = X_2D_new1[-n:, :]\nprint(\"Filtered data\", X_2D_new1)\nprint(\"Length of Filtered Data\", len(X_2D_new1))\n\n# Merge X_test_xl and new_data_X1_copy\nX_test_merged1 = np.concatenate((X_test_xl1, X_2D_new1))\nprint(\"Length of new test set\", len(X_test_merged1))\n\nX_test_merged1 = sc.transform(X_test_merged1)\n\n# STEP 2 - Predictions and Evaluation results on enhanced test set\n# -------------------------------------------------------------------\n# Take predictions on augmented test dataset\n\n# Model 1\n# 4. Predict\ny_pred_merged1_ann = ann.predict(X_test_merged1)\ny_pred_merged1_ann1 = (y_pred_merged1_ann > 0.5)\nmerged1_result = np.concatenate((y_pred_merged1_ann1.reshape(len(y_pred_merged1_ann1), 1), y_test_merged1.reshape(len(y_test_merged1), 1)), 1)\ny_pred2 = merged1_result[:, 0]\n\nann_result2 = evaluate_model(y_test_merged1, y_pred2)\nprint(\"ANN values for cm, accuracy, recall, precision and f1 score\", ann_result2)\n","sub_path":"models/pattern-sine50-ann-imb.py","file_name":"pattern-sine50-ann-imb.py","file_ext":"py","file_size_in_byte":13855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"544244446","text":"import pytest\nfrom spacy_pytorch_transformers import PyTT_Language, PyTT_WordPiecer\nfrom spacy_pytorch_transformers import PyTT_TokenVectorEncoder\n\nMODEL_NAMES = [\"bert-base-uncased\", \"gpt2\", \"xlnet-base-cased\"]\n\n\n@pytest.fixture(scope=\"session\", params=MODEL_NAMES)\ndef name(request):\n return request.param\n\n\n@pytest.fixture(scope=\"session\")\ndef nlp(name):\n p_nlp = PyTT_Language(pytt_name=name)\n p_nlp.add_pipe(p_nlp.create_pipe(\"sentencizer\"))\n p_nlp.add_pipe(PyTT_WordPiecer.from_pretrained(p_nlp.vocab, pytt_name=name))\n p_nlp.add_pipe(PyTT_TokenVectorEncoder.from_pretrained(p_nlp.vocab, name=name))\n return p_nlp\n","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"27931630","text":"from collections import Counter\nimport sys\n\nfact = [1]\nfor i in range(1, 101):\n fact.append(i * fact[-1])\n\ndef main():\n for line in sys.stdin.readlines():\n line = line.strip()\n n = len(line)\n c = Counter(line)\n\n ans = fact[n]\n for x in c:\n ans //= fact[c[x]]\n\n print(ans)\n\nmain()\n","sub_path":"kattis/anagramcounting.py","file_name":"anagramcounting.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"103508477","text":"#!/usr/bin/env python3\n\nimport cv2\nimport rospy\nimport threading\nimport numpy as np\nfrom sensor_msgs.msg import Image\n\nclass Camera:\n def __init__(self):\n # rospy.init_node('pioneer_camera', anonymous=False)\n \n self.source_image = np.zeros((rospy.get_param(\"/uvc_camera_center_node/width\"), rospy.get_param(\"/uvc_camera_center_node/height\"), 3), np.uint8)\n self.thread_rate = rospy.Rate(1)\n self.thread1_flag = False\n\n self.h_frame = rospy.get_param(\"/uvc_camera_center_node/height\")\n self.w_frame = rospy.get_param(\"/uvc_camera_center_node/width\")\n self.read_frames()\n\n def images_callback(self, img):\n dt = np.dtype(np.uint8)\n dt = dt.newbyteorder('>')\n arr = np.frombuffer(img.data,dtype=dt)\n\n arr = np.reshape(arr,(self.h_frame, self.w_frame ,3))\n self.source_image = cv2.cvtColor(arr, cv2.COLOR_BGR2RGB)\n \n def kill_threads(self):\n self.thread1_flag = True\n \n def thread_read_frames(self, stop_thread):\n rospy.Subscriber('/robotis/sensor/camera/image_raw', Image, self.images_callback)\n rospy.spin()\n\n # while True:\n # ## Subscriber\n # rospy.Subscriber('/robotis/sensor/camera/image_raw', Image, self.images_callback)\n # self.thread_rate.sleep()\n # if stop_thread():\n # rospy.loginfo(\"[Camera] Thread killed\")\n # break\n\n def read_frames(self):\n thread1 = threading.Thread(target = self.thread_read_frames, args =(lambda : self.thread1_flag, )) \n thread1.start()","sub_path":"PIONEER-ROBOT/pioneer_vision/src/pioneer_vision/camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":1621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"446884388","text":"import numpy as np\nfrom numpy import log as Log, sqrt as Sqrt, power as Power, pi as Pi, sinh as sh\n\n\ndef line_segment(a, b):\n def grin_function(x, s, **kwargs):\n x_, s_ = x[0], s[0]\n kappa = kwargs.get('eval_dict').get('kappa')\n result = np.zeros_like(s_)\n idx = np.logical_and(x_ >= a, x_ <= s_)\n result[idx] = (sh(kappa * (x_ - a)) * sh(kappa * (b - s_[idx]))) / (kappa * sh(kappa * (b - a)))\n idx = np.logical_and(x_ >= s_, x_ <= b)\n result[idx] = (sh(kappa * (s_[idx] - a)) * sh(kappa * (b - x_))) / (kappa * sh(kappa * (b - a)))\n return result\n\n return grin_function\n","sub_path":"nles/methods/two_sided_approximation/functions/grin_function/gelmgoltz_operator.py","file_name":"gelmgoltz_operator.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"444491749","text":"import pandas as pd\n\nfrom intcomm import IntComm\n\nif __name__ == \"__main__\":\n\n # change this according to your serial port\n # 0: \"/dev/ttyACM0\"\n # 1: \"/dev/ttyACM1\"\n # 2: \"/dev/ttyACM2\"\n intcomm = IntComm(0)\n all_data = []\n print(\"Start\")\n try:\n while True:\n data = intcomm.get_line()\n print(data)\n if len(data) == 0 or data[0] != \"#\":\n print(\"Invalid data:\", data)\n continue\n\n data = data[1:].split(\",\")\n if len(data) == 10:\n yaw, pitch, roll, gyrox, gyroy, gyroz, accx, accy, accz, emg = data\n all_data.append(\n [yaw, pitch, roll, gyrox, gyroy, gyroz, accx, accy, accz]\n )\n\n except KeyboardInterrupt:\n print(\"terminating program\")\n except Exception:\n print(\"an error occured\")\n\n df = pd.DataFrame(all_data)\n print(df.head())\n df.columns = [\"yaw\", \"pitch\", \"roll\", \"gx\", \"gy\", \"gz\", \"ax\", \"ay\", \"az\"]\n df.to_csv(\"logout3.csv\", sep=\",\")\n","sub_path":"trainingdatacollector2.py","file_name":"trainingdatacollector2.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"403979732","text":"\nimport os\nfrom django.contrib.gis.utils import LayerMapping\nfrom .models import Counties,Stores,Towns,Roads\n\n#Towns Mapping\ntowns_mapping = {\n 'area': 'AREA',\n 'perimeter': 'PERIMETER',\n 'ktowns_field': 'KTOWNS_',\n 'ktowns_id': 'KTOWNS_ID',\n 'town_name': 'TOWN_NAME',\n 'geom': 'MULTIPOINT',\n}\ntowns_shp=os.path.abspath(os.path.join(os.path.dirname(__file__),'data','majortowns.shp'),)\ndef trun(verbose=True):\n lm=LayerMapping(Towns,towns_shp,towns_mapping,transform=False)\n lm.save(strict=True,verbose=verbose)\n","sub_path":"crowns/crowns/load.py","file_name":"load.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"290521092","text":"# -*- coding: utf-8 -*-\n\nimport time\nimport requests\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom . import dao\n\nclass SoupUtil:\n\n def getSoupFromURL(url):\n response = requests.get(url)\n time.sleep(1)\n soup = BeautifulSoup(response.content, \"html.parser\")\n return soup\n \nclass DaoUtil:\n \n def scriptMakeToString(obj):\n answer = \"\"\n if obj == None:\n answer = \"NULL\"\n elif type(obj) == str:\n if obj.find(\"'\") > -1:\n answer = '\"' + obj + '\"'\n else:\n answer = \"'\" + obj + \"'\"\n elif type(obj) == int:\n answer = str(obj)\n elif type(obj) == float:\n answer = str(obj)\n elif type(obj) == bool:\n if obj:\n answer = \"true\"\n else:\n answer = \"false\"\n return answer\n \n def insertDataAndGetID(obj1, obj2, connect, tableName):\n if obj1 == None:\n return None\n with dao.Cursor(connect) as cursor:\n id_ = DaoUtil.getID(obj1, cursor, tableName)\n if id_ == False:\n with dao.Cursor(connect) as cursor:\n DaoUtil.insertData(obj1, obj2, cursor, tableName)\n with dao.Cursor(connect) as cursor:\n id_ = DaoUtil.getID(obj1, cursor, tableName)\n return id_\n\n def insertData(obj1, obj2, cursor, tableName):\n script = \"INSERT INTO \" + tableName + \" (name) VALUES (\" + DaoUtil.scriptMakeToString(obj1) + \")\" if obj2 == None else \"INSERT INTO \" + tableName + \" (name, description) VALUES (\" + DaoUtil.scriptMakeToString(obj1) + \", \" + DaoUtil.scriptMakeToString(obj2) + \")\"\n cursor.execute(script)\n \n def getID(obj, cursor, tableName):\n script = \"SELECT id FROM \" + tableName + \" WHERE name = \" + DaoUtil.scriptMakeToString(obj)\n cursor.execute(script)\n result = cursor.fetchone()\n if result == None:\n return False\n else:\n return result[0]\n\nclass SeleniumUtil:\n \n def clickElementByXpath(driver, xpath, waitingTime):\n wait = WebDriverWait(driver, waitingTime)\n try:\n element = wait.until(EC.presence_of_element_located((By.XPATH, xpath)))\n time.sleep(0.1)\n element.click()\n time.sleep(0.1)\n except:\n print(\"[ util.py ] : error occured at clicking element from xpath\")\n return\n \n def clickElementByLinkText(driver, word, waitingTime):\n wait = WebDriverWait(driver, waitingTime)\n try:\n element = wait.until(EC.presence_of_element_located((By.LINK_TEXT, word)))\n time.sleep(0.1)\n element.click()\n time.sleep(0.1)\n except:\n print(\"[ util.py ] : error occured at clicking element from link text\")\n return\n \n def getElementByXpath(driver, xpath, waitingTime):\n wait = WebDriverWait(driver, waitingTime)\n try:\n element = wait.until(EC.presence_of_element_located((By.XPATH, xpath)))\n except:\n element = None\n return element\n \n def getElementsByXpath(driver, xpath, waitingTime):\n wait = WebDriverWait(driver, waitingTime)\n try:\n element = wait.until(EC.presence_of_all_elements_located((By.XPATH, xpath)))\n except:\n element = None\n return element\n ","sub_path":"Class/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":3609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"502977767","text":"\"\"\"\nFunctions that implement prediction of and imaging from visibilities using the nifty gridder.\n\nhttps://gitlab.mpcdf.mpg.de/ift/nifty_gridder\n\nThis performs all necessary w term corrections, to high precision.\n\n\"\"\"\n\n__all__ = ['predict_ng', 'invert_ng']\n\nimport logging\nfrom typing import Union\n\nimport numpy\n\nfrom rascil.data_models.memory_data_models import BlockVisibility, \\\n Image\nfrom rascil.data_models.parameters import get_parameter\nfrom rascil.data_models.polarisation import convert_pol_frame\nfrom rascil.processing_components.image.operations import copy_image, \\\n image_is_canonical\nfrom rascil.processing_components.imaging.base import shift_vis_to_image, \\\n normalize_sumwt, fill_vis_for_psf\nfrom rascil.processing_components.visibility.base import copy_visibility\n\n\nlog = logging.getLogger('logger')\n\ntry:\n import nifty_gridder as ng\n \n \n def predict_ng(bvis: BlockVisibility, model: Image, **kwargs) -> BlockVisibility:\n \"\"\" Predict using convolutional degridding.\n \n Nifty-gridder version. https://gitlab.mpcdf.mpg.de/ift/nifty_gridder\n \n In the imaging and pipeline workflows, this may be invoked using context='ng'.\n\n :param bvis: BlockVisibility to be predicted\n :param model: model image\n :return: resulting BlockVisibility (in place works)\n \"\"\"\n \n assert isinstance(bvis, BlockVisibility), bvis\n assert image_is_canonical(model)\n \n if model is None:\n return bvis\n \n nthreads = get_parameter(kwargs, \"threads\", 4)\n epsilon = get_parameter(kwargs, \"epsilon\", 1e-12)\n do_wstacking = get_parameter(kwargs, \"do_wstacking\", True)\n verbosity = get_parameter(kwargs, \"verbosity\", 0)\n \n newbvis = copy_visibility(bvis, zero=True)\n \n # Extracting data from BlockVisibility\n freq = bvis.frequency # frequency, Hz\n nrows, nants, _, vnchan, vnpol = bvis.vis.shape\n \n uvw = newbvis.data['uvw'].reshape([nrows * nants * nants, 3])\n vist = numpy.zeros([vnpol, vnchan, nants * nants * nrows], dtype='complex')\n \n # Get the image properties\n m_nchan, m_npol, ny, nx = model.data.shape\n # Check if the number of frequency channels matches in bvis and a model\n # assert (m_nchan == v_nchan)\n assert (m_npol == vnpol)\n \n fuvw = uvw.copy()\n # We need to flip the u and w axes. The flip in w is equivalent to the conjugation of the\n # convolution function grid_visibility to griddata\n fuvw[:, 0] *= -1.0\n fuvw[:, 2] *= -1.0\n \n # Find out the image size/resolution\n pixsize = numpy.abs(numpy.radians(model.wcs.wcs.cdelt[0]))\n \n # Make de-gridding over a frequency range and pol fields\n vis_to_im = numpy.round(model.wcs.sub([4]).wcs_world2pix(freq, 0)[0]).astype('int')\n \n mfs = m_nchan == 1\n\n if mfs:\n for vpol in range(vnpol):\n vist[vpol, : , :] = ng.dirty2ms(fuvw.astype(numpy.float64),\n bvis.frequency.astype(numpy.float64),\n model.data[0, vpol, :, :].T.astype(numpy.float64),\n pixsize_x=pixsize,\n pixsize_y=pixsize,\n epsilon=epsilon,\n do_wstacking=do_wstacking,\n nthreads=nthreads,\n verbosity=verbosity).T\n\n else:\n for vpol in range(vnpol):\n for vchan in range(vnchan):\n imchan = vis_to_im[vchan]\n vist[vpol, vchan, :] = ng.dirty2ms(fuvw.astype(numpy.float64),\n numpy.array(freq[vchan:vchan + 1]).astype(numpy.float64),\n model.data[imchan, vpol, :, :].T.astype(numpy.float64),\n pixsize_x=pixsize,\n pixsize_y=pixsize,\n epsilon=epsilon,\n do_wstacking=do_wstacking,\n nthreads=nthreads,\n verbosity=verbosity)[:, 0]\n \n vis = convert_pol_frame(vist.T, model.polarisation_frame, bvis.polarisation_frame, polaxis=2)\n\n newbvis.data['vis'] = vis.reshape([nrows, nants, nants, vnchan, vnpol])\n \n # Now we can shift the visibility from the image frame to the original visibility frame\n return shift_vis_to_image(newbvis, model, tangent=True, inverse=True)\n\n \n def invert_ng(bvis: BlockVisibility, model: Image, dopsf: bool = False,\n normalize: bool = True,\n **kwargs) -> (Image, numpy.ndarray):\n \"\"\" Invert using nifty-gridder module\n \n https://gitlab.mpcdf.mpg.de/ift/nifty_gridder\n \n Use the image im as a template. Do PSF in a separate call.\n\n In the imaging and pipeline workflows, this may be invoked using context='ng'.\n\n :param dopsf: Make the PSF instead of the dirty image\n :param bvis: BlockVisibility to be inverted\n :param im: image template (not changed)\n :param normalize: Normalize by the sum of weights (True)\n :return: (resulting image, sum of the weights for each frequency and polarization)\n \n \"\"\"\n assert image_is_canonical(model)\n \n assert isinstance(bvis, BlockVisibility), bvis\n \n im = copy_image(model)\n \n nthreads = get_parameter(kwargs, \"threads\", 4)\n epsilon = get_parameter(kwargs, \"epsilon\", 1e-12)\n do_wstacking = get_parameter(kwargs, \"do_wstacking\", True)\n verbosity = get_parameter(kwargs, \"verbosity\", 0)\n \n sbvis = copy_visibility(bvis)\n sbvis = shift_vis_to_image(sbvis, im, tangent=True, inverse=False)\n \n freq = sbvis.frequency # frequency, Hz\n \n nrows, nants, _, vnchan, vnpol = sbvis.vis.shape\n # if dopsf:\n # sbvis = fill_vis_for_psf(sbvis)\n\n ms = sbvis.vis.reshape([nrows * nants * nants, vnchan, vnpol])\n ms = convert_pol_frame(ms, bvis.polarisation_frame, im.polarisation_frame, polaxis=2)\n\n uvw = sbvis.uvw.reshape([nrows * nants * nants, 3])\n wgt = sbvis.flagged_imaging_weight.reshape([nrows * nants * nants, vnchan, vnpol])\n\n if epsilon > 5.0e-6:\n ms = ms.astype(\"c8\")\n wgt = wgt.astype(\"f4\")\n \n # Find out the image size/resolution\n npixdirty = im.nwidth\n pixsize = numpy.abs(numpy.radians(im.wcs.wcs.cdelt[0]))\n \n fuvw = uvw.copy()\n # We need to flip the u and w axes.\n fuvw[:, 0] *= -1.0\n fuvw[:, 2] *= -1.0\n \n nchan, npol, ny, nx = im.shape\n im.data[...] = 0.0\n sumwt = numpy.zeros([nchan, npol])\n \n # There's a latent problem here with the weights.\n # wgt = numpy.real(convert_pol_frame(wgt, bvis.polarisation_frame, im.polarisation_frame, polaxis=2))\n \n # Set up the conversion from visibility channels to image channels\n vis_to_im = numpy.round(model.wcs.sub([4]).wcs_world2pix(freq, 0)[0]).astype('int')\n \n # Nifty gridder likes to receive contiguous arrays so we transpose\n # at the beginning\n \n mfs = nchan == 1\n if dopsf:\n \n mst = ms.T\n mst[...] = 0.0\n mst[0, ...] = 1.0\n wgtt = wgt.T\n\n if mfs:\n dirty = ng.ms2dirty(fuvw.astype(numpy.float64),\n bvis.frequency.astype(numpy.float64),\n numpy.ascontiguousarray(mst[0, :, :].T),\n numpy.ascontiguousarray(wgtt[0, :, :].T),\n npixdirty, npixdirty, pixsize, pixsize, epsilon,\n do_wstacking=do_wstacking,\n nthreads=nthreads, verbosity=verbosity)\n sumwt[0, :] += numpy.sum(wgtt[0, 0, :].T, axis=0)\n im.data[0, :] += dirty.T\n else:\n for vchan in range(vnchan):\n ichan = vis_to_im[vchan]\n frequency = numpy.array(freq[vchan:vchan + 1]).astype(numpy.float64)\n dirty = ng.ms2dirty(fuvw.astype(numpy.float64),\n frequency.astype(numpy.float64),\n numpy.ascontiguousarray(mst[0, vchan, :][..., numpy.newaxis]),\n numpy.ascontiguousarray(wgtt[0, vchan, :][..., numpy.newaxis]),\n npixdirty, npixdirty, pixsize, pixsize, epsilon,\n do_wstacking=do_wstacking,\n nthreads=nthreads, verbosity=verbosity)\n sumwt[ichan, :] += numpy.sum(wgtt[0, ichan, :].T, axis=0)\n im.data[ichan, :] += dirty.T\n else:\n mst = ms.T\n wgtt = wgt.T\n for pol in range(npol):\n if mfs:\n dirty = ng.ms2dirty(fuvw.astype(numpy.float64),\n bvis.frequency.astype(numpy.float64),\n numpy.ascontiguousarray(mst[pol, :, :].T),\n numpy.ascontiguousarray(wgtt[pol, :, :].T),\n npixdirty, npixdirty, pixsize, pixsize, epsilon,\n do_wstacking=do_wstacking,\n nthreads=nthreads, verbosity=verbosity)\n sumwt[0, pol] += numpy.sum(wgtt[pol, 0, :].T, axis=0)\n im.data[0, pol] += dirty.T\n else:\n for vchan in range(vnchan):\n ichan = vis_to_im[vchan]\n frequency = numpy.array(freq[vchan:vchan + 1]).astype(numpy.float64)\n dirty = ng.ms2dirty(fuvw.astype(numpy.float64),\n frequency.astype(numpy.float64),\n numpy.ascontiguousarray(mst[pol, vchan, :][..., numpy.newaxis]),\n numpy.ascontiguousarray(wgtt[pol, vchan, :][..., numpy.newaxis]),\n npixdirty, npixdirty, pixsize, pixsize, epsilon,\n do_wstacking=do_wstacking,\n nthreads=nthreads, verbosity=verbosity)\n sumwt[ichan, pol] += numpy.sum(wgtt[pol, ichan, :].T, axis=0)\n im.data[ichan, pol] += dirty.T\n\n \n if normalize:\n im = normalize_sumwt(im, sumwt)\n \n return im, sumwt\n\nexcept ImportError:\n import warnings\n \n warnings.warn('Cannot import nifty_gridder, ng disabled', ImportWarning)\n \n \n def predict_ng(bvis: BlockVisibility, model: Image, **kwargs) -> BlockVisibility:\n \"\"\" Predict using convolutional degridding.\n\n Nifty-gridder version. https://gitlab.mpcdf.mpg.de/ift/nifty_gridder\n\n In the imaging and pipeline workflows, this may be invoked using context='ng'.\n\n :param bvis: BlockVisibility to be predicted\n :param model: model image\n :return: resulting BlockVisibility (in place works)\n \"\"\"\n\n log.error(\"Nifty gridder not available\")\n return bvis\n \n \n def invert_ng(bvis: BlockVisibility, model: Image, dopsf: bool = False,\n normalize: bool = True,\n **kwargs) -> (Image, numpy.ndarray):\n \"\"\" Invert using nifty-gridder module\n\n https://gitlab.mpcdf.mpg.de/ift/nifty_gridder\n\n Use the image im as a template. Do PSF in a separate call.\n\n Any shifting needed is performed here.\n\n In the imaging and pipeline workflows, this may be invoked using context='ng'.\n\n :param bvis: BlockVisibility to be inverted\n :param im: image template (not changed)\n :param normalize: Normalize by the sum of weights (True)\n :return: (resulting image, sum of the weights for each frequency and polarization)\n\n \"\"\"\n log.error(\"Nifty gridder not available\")\n return model, None\n","sub_path":"rascil/processing_components/imaging/ng.py","file_name":"ng.py","file_ext":"py","file_size_in_byte":12824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"218023543","text":"import stopaj\n\n\ndef sled_matrike1(matrika):\n sled = 0\n for i, vrstica in enumerate(matrika):\n for j, element in enumerate(vrstica):\n if i == j:\n sled += element\n return sled\n\n\ndef sled_matrike2(matrika):\n sled = 0\n for i, vrstica in enumerate(matrika):\n sled += vrstica[i]\n return sled\n\n\ndef sled_matrike3(matrika):\n sled = 0\n for i in range(len(matrika)):\n sled += matrika[i][i]\n return sled\n\n\nstopaj.izmeri_case_poskusov(\n [stopaj.nakljucna_matrika(50 * n) for n in range(1, 20)],\n [sled_matrike1, sled_matrike2, sled_matrike3],\n)\n","sub_path":"odlozisce/datoteke-s-predavanj/13-racunska-zahtevnost/sled.py","file_name":"sled.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"19170751","text":"import sys\n\nsys.stdin = open(\"input.txt\", \"r\")\n\nn = int(input())\na = []\nfor _ in range(n):\n a.append(int(input()))\n\nleft = 1\nright = 1\n\nleft_max = a[0]\nright_max = a[-1]\n\nfor i in range(1, n):\n if a[i-1] < a[i] and left_max < a[i]:\n left_max = a[i]\n left += 1\n\nfor i in range(0, n-1):\n if a[n-i-1] < a[n-i-2] and right_max < a[n-i-2]:\n right_max = a[n-i-2]\n right += 1\n\nprint(left)\nprint(right)\n\n","sub_path":"1668.py","file_name":"1668.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"78410269","text":"from string import Template\n\n\nMAINTEMPLATE = \"\"\"MIRRORSITE=$mirrorsite\nDISTRIBUTION=$dist\nDEBOOTSTRAP=$debootstrap\nDEBOOTSTRAPOPTS=('--variant=buildd' '--keyring=/etc/apt/trusted.gpg')\nAPTCACHE=\"\"\nDEBBUILDOPTS=$debbuild_opts\nPBUILDERSATISFYDEPENDSCMD=/usr/lib/pbuilder/pbuilder-satisfydepends\nOTHERMIRROR=\"$othermirrors\"\nEXTRAPACKAGES=\"\"\nHOOKDIR=/etc/pbuilder/hooks\n# exported variables go below this line\nexport CFLAGS=\"$cflags\"\nexport DEB_BUILD_OPTIONS=\"$deb_build_options\"\n\"\"\"\n\n \n \nclass PbuilderConfigManager(object):\n def __init__(self):\n self.template = Template(MAINTEMPLATE)\n self.mirrorsite = 'http://cypress.forest/debrepos/debian'\n self.dist = 'squeeze'\n self.debootstrap = 'cdebootstrap'\n self.debbuild_opts = ''\n self.other_mirrors = []\n self.cflags = '-pipe'\n self.deb_build_options = 'parallel=3'\n\n\n def make_mirrorsite_entries(self, host, dist):\n self.other_mirrors = []\n self.mirrorsite = 'http://%s/debrepos/debian' % host\n baseurl = 'http://%s/debrepos/paella' % host\n dists = [dist, '%s-backports' % dist]\n components = 'main contrib non-free'\n for d in dists:\n omirror = 'deb %s %s %s' % (baseurl, d, components)\n self.other_mirrors.append(omirror)\n \n def make_data(self):\n keys = ['mirrorsite', 'dist', 'debootstrap',\n 'debbuild_opts', 'cflags', 'deb_build_options']\n data = dict()\n for key in keys:\n data[key] = getattr(self, key)\n data['othermirrors'] = '|'.join(self.other_mirrors)\n return data\n\n def substitute(self):\n data = self.make_data()\n return self.template.substitute(data)\n \n \n \n \n\n\n\n \n \nif __name__ == '__main__':\n bp = BullProdder()\n\n\n \n \n \n \n \nif __name__ == '__main__':\n bp = BullProdder()\n\n\n \n \n \n \n","sub_path":"debrepos/pbuilderrc.py","file_name":"pbuilderrc.py","file_ext":"py","file_size_in_byte":1930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"137734195","text":"# -*- coding: utf-8 -*-\nimport scrapy\nimport json\nfrom scrapy.http import Request\n\n\nclass PctidningenSpider(scrapy.Spider):\n name = 'pctidningen'\n allowed_domains = ['test.pctidningen.se']\n start_urls = ['https://test.pctidningen.se/cms/wp-admin/admin-ajax.php'\n '?action=product-search&price_from=0&price_to=0&orderby=' \\\n 'review_date&order=desc&page=%d' % n for n in range(1, 92)]\n\n def parse(self, response):\n review_data = json.loads(response.text)\n data = review_data['data']['rows']\n urls = []\n for row in data:\n urls.append(row['url'])\n\n for url in urls:\n yield Request(url=url, callback=self.parse_p)\n\n def parse_p(self, response):\n title_xpath = '//h1[@class=\"h1\"]//text()'\n title = response.xpath(title_xpath).get()\n\n date_xpath = '//time/@datetime'\n date = response.xpath(date_xpath).get()\n\n rating_xpath = '//span[@itemprop=\"ratingValue\"]//text()'\n rating = response.xpath(rating_xpath).get()\n\n author_xpath = '//div[@class=\"meta-information\"]/span//text()'\n author = response.xpath(author_xpath).get()\n\n summary_xpath = '//meta[@property=\"og:description\"]/@content'\n summary = response.xpath(summary_xpath).get()\n\n ocn_xpath = '//span[@class=\"tags\"]/a//text()'\n ocn = response.xpath(ocn_xpath).get()\n ocn = ocn[1:]\n\n pros_xpath = '//font[@color=\"green\"]/following-sibling::text()[1] |' \\\n ' //span[@style=\"color: green;\"]/following-sibling::text()[1]'\n pros = response.xpath(pros_xpath).get()\n\n cons_xpath = '//font[@color=\"red\"]/following-sibling::text()[1] |' \\\n ' //span[@style=\"color: red;\"]/following-sibling::text()[1]'\n cons = response.xpath(cons_xpath).get()\n\n verdict_xpath = '//div[@itemprop=\"description\"]/p[last()-1]/text()'\n verdict = response.xpath(verdict_xpath).get()\n\n url = response.url\n\n yield {'Title': title,\n 'Date': date,\n 'Rating': rating,\n 'Author': author,\n 'Summary': summary,\n 'OCN': ocn,\n 'Pros': pros,\n 'Cons': cons,\n 'Verdict': verdict,\n 'Url': url\n }","sub_path":"point/point/spiders/pctidningen.py","file_name":"pctidningen.py","file_ext":"py","file_size_in_byte":2329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"365827875","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 1 20:59:43 2017\n\n@author: ankur\n\"\"\"\n\n#functions and structures for binary thresholding\n\nfrom collections import namedtuple\nimport numpy as np\nimport cv2\nimport utilities as cvUtils\n\nThresholdRange = namedtuple('ThresholdRange', ['min', 'max', 'dTh'])\n\n#RdxThresh = Threshold(150, 250, 5)\n\n#Adaptive Binary Threshold.\n#We use R channel from RGB and V from HSV.\n#We iteratively find threshold parameters that give just enough pixels.\n## Input:\n #ROI image:3 channel\n #Initial values for R and V\n #Bailout: max iterations\n## Output:\n #Binary image: 3 channel\n #RthreshValue\n #VthreshValue\n# input ROI image is 3 channel\n# returns a 3channel Binary image (0 and 255)\ndef binary_threshold(roi, config):\n \n R_Range = config['R_Range']\n V_Range = config['V_Range']\n R_Thresh = config['R_best']\n V_Thresh = config['V_best']\n bailout = config['bailout']\n minLane = config['minLane']\n maxLane = config['maxLane']\n \n thresh_img = np.zeros_like(roi[:, :, 0])\n \n total_pixels = thresh_img.shape[0]*thresh_img.shape[1]\n \n #Using R and G channels to mask out dark or grey road/ shadow values\n (B,G,R) = cv2.split(roi)\n \n R_dx = np.absolute(cv2.Sobel(R, cv2.CV_64F, 1, 0))\n R_dx *= 255/np.max(R_dx)\n \n hsv = cvUtils.colorspace(roi, cv2.COLOR_BGR2HSV)\n V = cvUtils.get_channel(hsv, 2)\n \n #Create an initial binary image. We will refine this if we don't have enough pixels or too many pixels\n thresh_img[(V >= V_Thresh) | (R >= R_Thresh)] = 255\n \n #Count of Non-Zero mask pixels\n nzcount = np.count_nonzero(thresh_img)\n \n #Bailout Counter. If we can not reach a good value in n steps, stop wasting time and move on.\n counter = 0\n \n wiggleScope = True\n \n #We want the number of lane pixels to be within 1% to 3% of the total pixels in image\n minarea = minLane * total_pixels\n maxarea = maxLane * total_pixels\n \n success = True\n ddth = 0\n while ((nzcount < minarea) | (nzcount >= maxarea)) & (wiggleScope):\n \n print(nzcount/total_pixels, counter, R_Thresh, V_Thresh)\n counter += 1\n if (counter == int(bailout/2)):\n ddth = 3\n if (counter == bailout):\n print(\"Unable to find a good value in {} steps. Bailing out!\".format(bailout))\n success = False\n break\n \n #If there is still scope in moving ranges\n VwiggleScope = (V_Thresh >= V_Range.min) and (V_Thresh <= V_Range.max)\n RwiggleScope = (R_Thresh >= R_Range.min) and (R_Thresh <= R_Range.max)\n wiggleScope = VwiggleScope or RwiggleScope\n \n if wiggleScope:\n if (nzcount < minarea):\n if VwiggleScope:\n V_Thresh -= (V_Range.dTh - ddth)\n if RwiggleScope:\n R_Thresh -= (R_Range.dTh - ddth)\n else:\n if VwiggleScope:\n V_Thresh += (V_Range.dTh - ddth)\n if RwiggleScope:\n R_Thresh += (R_Range.dTh - ddth)\n \n thresh_img = np.zeros_like(thresh_img)\n thresh_img[(V >= V_Thresh) | (R >= R_Thresh)] = 255\n nzcount = np.count_nonzero(thresh_img)\n \n else:\n print(\"Unable to find a good value in range. Bailing out!\")\n success = False\n break \n \n print(\"{:.2f} %cnt, {} steps, {} nzcnt\".format(nzcount/total_pixels, counter, total_pixels))\n \n bin_img = np.dstack((thresh_img, thresh_img, thresh_img))\n config['R_best'] = R_Thresh\n config['V_best'] = V_Thresh\n\n if not success:\n nzcount = np.count_nonzero(thresh_img)\n thresh_img = np.zeros_like(thresh_img)\n config['R_best'] = config['R_init']\n config['V_best'] = config['V_init']\n return success, bin_img, config\n","sub_path":"BinaryThreshold.py","file_name":"BinaryThreshold.py","file_ext":"py","file_size_in_byte":3941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"463133698","text":"import re\nimport os\nimport csv\nimport urllib\nfrom collections import namedtuple\nfrom datetime import datetime\nimport logging\n\nimport numpy as np\n\nfrom PyQt4 import QtGui, QtCore\n\nfrom Orange.widgets import widget, gui, settings\nfrom Orange.data import Table, Domain, DiscreteVariable, ContinuousVariable, StringVariable\n\n\nlog = logging.getLogger()\n\n\nSHEETS_PATTERN = re.compile(\n r'(?:https?://)?(?:www\\.)?'\n 'docs\\.google\\.com/spreadsheets/d/'\n '(?P[-\\w_]+)'\n '(?:/.*?gid=(?P\\d+).*|.*)?',\n re.IGNORECASE\n)\n\n\ndef SHEETS_URL(url):\n match = SHEETS_PATTERN.match(url)\n workbook, sheet = match.group('workbook_id'), match.group('sheet_id')\n if not workbook: raise ValueError\n url = 'https://docs.google.com/spreadsheets/d/{}/export?format=tsv'.format(workbook)\n if sheet: url += '&gid=' + sheet\n return url\n\n\nSheet = namedtuple('Sheet', ('name', 'url'))\n\n\n# FIXME: This belongs into Table.from_url!!\n#\n#\n#\n#\n# Do not use this, see https://github.com/biolab/orange3/pull/678 instead.\n#\n#\n#\n#\ndef from_url(url):\n name = urllib.parse.urlparse(url)[2].replace('/', '_')\n\n def suggested_filename(content_disposition):\n # See https://tools.ietf.org/html/rfc6266#section-4.1\n matches = re.findall(r\"filename\\*?=(?:\\\"|.{0,10}?'[^']*')([^\\\"]+)\",\n content_disposition or '')\n return urllib.parse.unquote(matches[-1]) if matches else ''\n\n def get_encoding(content_disposition):\n matches = re.findall(r\"filename\\*=(.{0,10}?)'[^']*'\",\n content_disposition or '')\n return matches[0].lower() if matches else 'utf-8'\n\n with urllib.request.urlopen(url, timeout=10) as response:\n name = suggested_filename(response.headers['content-disposition']) or name\n\n encoding = get_encoding(response.headers['content-disposition'])\n text = [row.decode(encoding) for row in response]\n csv_reader = csv.reader(text, delimiter='\\t')\n header = next(csv_reader)\n data = np.array(list(csv_reader))\n\n attrs = []\n metas = []\n attrs_cols = []\n metas_cols = []\n for col in range(data.shape[1]):\n values = [val for val in data[:, col] if val not in ('', '?', 'nan')]\n try: floats = [float(i) for i in values]\n except ValueError:\n # Not numbers\n values = set(values)\n if len(values) < 12:\n attrs.append(DiscreteVariable(header[col], values=sorted(values)))\n attrs_cols.append(col)\n else:\n metas.append(StringVariable(header[col]))\n metas_cols.append(col)\n else:\n attrs.append(ContinuousVariable(header[col]))\n attrs_cols.append(col)\n\n domain = Domain(attrs, metas=metas)\n data = np.hstack((data[:, attrs_cols], data[:, metas_cols]))\n table = Table.from_list(domain, data.tolist())\n\n table.name = os.path.splitext(name)[0]\n return table\n\n\nclass OWGoogleSheets(widget.OWWidget):\n name = \"Google Sheets\"\n description = \"Read data from a Google Sheets spreadsheet.\"\n icon = \"icons/GoogleSheets.svg\"\n priority = 20\n outputs = [(\"Data\", Table)]\n\n want_main_area = False\n\n recent = settings.Setting([])\n autocommit = settings.Setting(True)\n\n def __init__(self):\n super().__init__()\n hb = gui.widgetBox(self.controlArea, 'Google Sheets', orientation='horizontal')\n self.combo = combo = QtGui.QComboBox(hb)\n combo.setEditable(True)\n combo.setMinimumWidth(300)\n hb.layout().addWidget(QtGui.QLabel('URL:', hb))\n hb.layout().addWidget(combo)\n hb.layout().setStretch(1, 2)\n box = gui.widgetBox(self.controlArea, \"Info\", addSpace=True)\n info = self.data_info = gui.widgetLabel(box, '')\n info.setWordWrap(True)\n self.controlArea.layout().addStretch(1)\n gui.auto_commit(self.controlArea, self, 'autocommit', label='Commit')\n\n self.set_combo_items()\n self.table = None\n self.set_info()\n self.timer = QtCore.QTimer(self)\n combo.editTextChanged.connect(self.on_combo_textchanged)\n combo.currentIndexChanged.connect(self.on_combo_activated)\n combo.currentIndexChanged.emit(0)\n\n def set_combo_items(self):\n self.combo.clear()\n for sheet in self.recent:\n self.combo.addItem(sheet.name, sheet.url)\n\n def commit(self):\n self.send('Data', self.table)\n\n def on_combo_textchanged(self, text):\n self.timer.stop()\n try: url = SHEETS_URL(text)\n except (ValueError, AttributeError):\n self.error('Unrecognized URL; should be \"docs.google.com/spreadsheets/d/\"')\n return\n self.error()\n self.timer = QtCore.QTimer(self)\n self.timer.setSingleShot(True)\n self.timer.timeout.connect(lambda: self.on_combo_activated(url=url))\n self.timer.start(500)\n\n def on_combo_activated(self, index=float('inf'), url=''):\n self.error()\n if 0 <= index < len(self.recent):\n sheet = self.recent.pop(index)\n self.table = self.retrieve(sheet.url)\n self.recent.insert(0, sheet)\n elif url:\n table = self.table = self.retrieve(url)\n if not table: return\n sheet = Sheet(table.name, url)\n self.recent = [s for s in self.recent if s.url != url]\n self.recent.insert(0, sheet)\n else: return\n self.set_info()\n self.commit()\n\n self.combo.editTextChanged.disconnect(self.on_combo_textchanged)\n self.combo.currentIndexChanged.disconnect(self.on_combo_activated)\n self.set_combo_items()\n self.combo.editTextChanged.connect(self.on_combo_textchanged)\n self.combo.currentIndexChanged.connect(self.on_combo_activated)\n\n def set_info(self):\n data = self.table\n if not data:\n self.data_info.setText('No spreadsheet loaded.')\n return\n text = \"{} instance(s), {} feature(s), {} meta attribute(s)\".format(\n len(data), len(data.domain.attributes), len(data.domain.metas))\n try: text += '\\nFirst entry: {}\\nLast entry: {}'.format(data[0, 'Timestamp'],\n data[-1, 'Timestamp'])\n except Exception: pass # no Timestamp header\n self.data_info.setText(text)\n\n def retrieve(self, url):\n if not url: return\n progress = gui.ProgressBar(self, 10)\n for i in range(3): progress.advance()\n try: table = from_url(url)\n except Exception as e:\n import traceback\n log.error(traceback.format_exc())\n log.error(\"Couldn't load spreadsheet %s: %s\", url, e)\n self.error(\"Couldn't load spreadsheet. Ensure correct read permissions; rectangle, top-left aligned sheet data ...\")\n return\n else:\n for i in range(7): progress.advance()\n finally:\n progress.finish()\n return table\n\n\nif __name__ == \"__main__\":\n a = QtGui.QApplication([])\n ow = OWGoogleSheets()\n ow.show()\n a.exec_()\n ow.saveSettings()\n","sub_path":"lib/python3.5/site-packages/orangecontrib/prototypes/widgets/owgooglesheets.py","file_name":"owgooglesheets.py","file_ext":"py","file_size_in_byte":7281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"610536071","text":"\"\"\"\nBag-of-words without N-gram\nusing tf-idf\n\"\"\"\nimport data_helper\nfrom textblob import TextBlob\nimport os\nimport math\n\nfile = \"./20news-18828\"\nfile_frequency = \"./data_statistic_every\"\nfile_tf_idf = \"./data_tf_idf\"\nfrequency = 50\n\ndef build_dic(path):\n\t\"\"\"\n\t:param path:\n\t:return:\n\t建立词典,存储到文件中\n\t\"\"\"\n\tdic_origin = data_helper.data_load(path)\n\tdic_split = data_helper.data_split(dic_origin)\n\tdic_re = data_helper.data_recover(dic_split, frequency)\n\twith open(\"./data/dict_\" + str(frequency) + \".txt\", \"w+\", encoding=\"utf-8\") as output:\n\t\tfor i in dic_re:\n\t\t\toutput.write(str(i) + \"\\n\")\n\n# build_dic(file)\n\ndef load_dic():\n\t\"\"\"\n\t:return:从文件加载词典\n\t\"\"\"\n\twith open(\"./data/dict_50.txt\", \"r\", encoding=\"utf-8\") as load_dic:\n\t\tdic_word = dict()\n\t\tload_dic_now = load_dic.readlines()\n\t\tfor index in range(len(load_dic_now)):\n\t\t\tdic_word[load_dic_now[index].replace(\"\\n\", \"\")] = index\n\n\t\treturn dic_word\n\n\nif __name__ == \"__main__\":\n\n\t# data_origin = data_helper.data_load(file)\n\t# for key in data_origin.keys():\n\t# \tfor i in data_origin[key]:\n\t# \t\tdata_sp = TextBlob(str(i).strip().replace(\"\\\\n\", \"\"))\n\t# \t\twith open(\"./data_split.txt\", \"a\", encoding=\"utf-8\") as output:\n\t# \t\t\toutput.write(str(data_sp.words) + \"\\n\")\n\n\t\"\"\"\n\t读取词典和总词频\n\t\"\"\"\n\tdic = load_dic()\n\tindex = 0\n\t# print(dic.keys())\n\tdic_frequency = dict()\n\tdic_tf_idf = dict()\n\twith open(\"./data/word_frequency_all.txt\", \"r\", encoding=\"utf-8\") as frequency:\n\t\tfrequency = frequency.readlines()\n\t\tfor i in frequency:\n\t\t\tj = i.split(\":\")\n\t\t\tdic_frequency[j[0]] = int(j[1])\n\n\t\"\"\"\n\ttf和idf的分别计算,由于存储的问题,需要读取整个文件夹\n\t\"\"\"\n\tfiles = os.listdir(file_frequency)\n\tfor file in files:\n\t\tfile_list = os.listdir(file_frequency + \"/\" + file)\n\t\tos.mkdir(file_tf_idf + \"/\" + file)\n\t\tfor file_text in file_list:\n\t\t\twith open(file_frequency + \"/\" + file + \"/\" + file_text, \"r\", encoding=\"utf-8\") as frequency_every:\n\t\t\t\tlength = 0\n\t\t\t\tfrequency_every = frequency_every.readlines()\n\t\t\t\tfor i in frequency_every:\n\t\t\t\t\tif len(i) < 2:continue\n\t\t\t\t\tj = i.split(\":\")\n\t\t\t\t\tlength += int(j[1])\n\t\t\t\t# print(length)\n\t\t\t\tprint(file_frequency + \"/\" + file + \"/\" + file_text)\n\t\t\t\tfor i in frequency_every:\n\t\t\t\t\tif len(i) < 2: continue\n\t\t\t\t\tj = i.split(\":\")\n\t\t\t\t\ttf = float(j[1])/length\n\t\t\t\t\tidf = math.log(18829/(dic_frequency[j[0]]+1))\n\t\t\t\t\tdic_tf_idf[j[0]] = tf*idf\n\t\t\twith open(file_tf_idf + \"/\" + file + \"/\" + file_text, \"a\", encoding=\"utf-8\") as output:\n\t\t\t\tfor key in dic_tf_idf.keys():\n\t\t\t\t\toutput.write(key + \":\" + str(dic_tf_idf[key])+ \"\\n\")","sub_path":"KNN/Vector_space_model.py","file_name":"Vector_space_model.py","file_ext":"py","file_size_in_byte":2562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"305583118","text":"#!/usr/bin/python3\n\n# Copyright (c) 2018 2BRobots\n# Author: dannimakes\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\nfrom tkinter import *\n\nclass virtualLCD(object):\n\n def __init__(self,cols,rows):\n self.root = Tk()\n self.root.title(\"virtualLCD\")\n self.root.geometry(\"+500+100\")\n self.buffer = \"\"\n self.buffer = self.buffer.ljust(cols*rows)\n self.labelfont = ('Courier', 24, 'bold')\n self.widget = Label(self.root, text=self.buffer, height=rows, width=cols, anchor=NW, justify=LEFT)\n self.widget.config(bg='yellow', fg='black') #here you can change the background and text color of the LCD \n self.widget.config(font=self.labelfont) \n self.widget.pack(expand=YES, fill=BOTH)\n self.root.update()\n self.cols = cols\n self.rows = rows\n self.clear()\n \n def clear(self):\n self.set_cursor(0,0)\n self.buffer = \"\"\n self.buffer = self.buffer.ljust(self.cols*self.rows)\n self.widget.config(text=self.buffer)\n self.widget.pack(expand=YES, fill=BOTH)\n self.root.update()\n \n def message(self,data):\n data = data.replace('\\n','')\n self.buffer = self.buffer.replace('\\n','')\n data = self.buffer[:self.cursor] + data + self.buffer[(self.cursor+len(data)):]\n data = data[:(self.cols*self.rows)]\n data = '\\n'.join([data[i:i + self.cols] for i in range(0, len(data), self.cols)])\n self.widget.config(text=data)\n self.buffer = data\n self.root.update()\n\n def set_cursor(self,x,y):\n self.cursor = ((y*self.cols)+x)\n\n def read(self):\n return self.buffer\n \n","sub_path":"virtualLCD/virtualLCD.py","file_name":"virtualLCD.py","file_ext":"py","file_size_in_byte":2703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"505693825","text":"#!/usr/bin/env python\n#-*- coding: utf-8 -*-\n\n# @Time : 2018/5/3 17:16\n\n\"\"\"\n粗略实现类似于文件的类.通过这些类能将输入输出重定向到图形界面上显示;输入来自于\n通用的弹出式对话框(一个能将输入输出融合起来的界面,或者一个持续的,用于输入的数据\n录入字段更好);对于字节数大于len(line)的读取请求,分行会出现问题;也可向GuiInput\n里增加__iter__或者__next__以像文件一样支持行迭代,但会出现过多的弹窗;\n\"\"\"\n\nfrom tkinter import *\nfrom tkinter.simpledialog import askstring\nfrom tkinter.scrolledtext import ScrolledText\n\nclass GuiOutput:\n font = ('courier',9,'normal') # 在类里,适用于整体,而self只适用于个体\n def __init__(self,parent=None):\n self.text = None\n if parent: self.popupnow(parent) # 先弹出或第一次写入parent窗口\n\n def popupnow(self,parent=None): # 然后再到顶层窗口\n if self.text: return\n self.text = ScrolledText(parent or Toplevel())\n self.text.config(font=self.font)\n self.text.pack()\n def write(self,text):\n self.popupnow()\n self.text.insert(END,str(text))\n self.text.see(END)\n #self.text.update() # 每行结束后更新界面\n\n def writeline(self,lines): # 有'\\n'的行\n for line in lines: self.write(line) # 或者使用map(self.write,line)\n\nclass GuiInput:\n def __init__(self):\n self.buff = ''\n\n def inputLine(self):\n line = askstring('GuiInput','Enter input line + (cancel=eof)')\n if line == None:\n return '' # 针对各行弹出对话框\n else: # 取消按钮表示文件末尾\n return line + '\\n' # 否则,添加行结束的标记\n\n def read(self,bytes=None):\n if not self.buff:\n self.buff = self.inputLine()\n if bytes: # 按照字节数读入\n text = self.buff[:bytes] # 不分行\n self.buff = self.buff[bytes:]\n else:\n text = '' # 持续读入,直到行末\n line = self.buff\n while line:\n text = text + line\n line = self.inputLine() # 直到cancel eof或者''\n return text\n\n def readline(self):\n text = self.buff or self.inputLine() # 枚举文件读取方法\n self.buff = ''\n return text\n\n def readlines(self):\n lines = [] # 读入所有的行\n while True:\n next = self.readline()\n if not next: break\n lines.append(next)\n return lines\n\ndef redirectedGuiFunc(func,*pargs,**kargs):\n import sys\n saveStreams = sys.stdin,sys.stdout # 将函数中的流映射输入到弹出的窗口中\n sys.stdin = GuiInput() # 根据需要弹出对话框\n sys.stdout = GuiOutput() # 响应调用,创建新的输出窗口\n sys.stderr = sys.stdout\n\n result = func(*pargs,**kargs) # 这里阻塞调用\n sys.stdin,sys.stdout = saveStreams\n return result\n\ndef redirectedGuiShellCmd(command):\n import os\n input = os.popen(command,'r')\n output = GuiOutput()\n def reader(input,output): # 显示一个shell命令的\n while True: # 标准输出\n line = input.readline() # 在新的弹出式文件框组件中\n if not line: break # 调用readline时可能阻塞\n output.write(line)\n reader(input,output)\n\nif __name__ == '__main__': # 运行时自测\n def makeUpper(): # 使用标准流\n while True:\n try:\n line = input('Line? ')\n except:\n break\n print(line.upper())\n print('end of file')\n\n def makeLower(input,output): # 使用显式文件\n while True:\n line = input.readline()\n if not line: break\n output.write(line.lower())\n print('end of file')\n\n root = Tk()\n Button(root,text='test streams',\n command=lambda: redirectedGuiFunc(makeUpper)).pack(fill=X)\n Button(root,text='test files',\n command=lambda: makeLower(GuiInput(),GuiOutput())).pack(fill=X)\n Button(root,text='test.popen',\n command=lambda: redirectedGuiShellCmd('dir *')).pack(fill=X)\n root.mainloop()","sub_path":"guiStreams.py","file_name":"guiStreams.py","file_ext":"py","file_size_in_byte":4558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"129705820","text":"# -*- coding:utf-8 -*\nimport random\n# import fmovice\n# import urllib\n# import time\n# s=time.time()\n# print(type(s))\n# print(int(s))\n# d=int(s)\n# y=str(d)\n# print(y)\n# print(type(y))\n# movie = fmovice.Search_Movice(\"大上海\")\n# print(movie)\nlist=['限时抢购','手机商城','服装','团购']\nl=random.choice(list)\nprint(l)\n","sub_path":"2017_10_27—测试/Dome/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"626142337","text":"import tweepy #Library for Twitter\r\nimport json,os,datetime\r\nimport emoji\r\n\r\n#loading credentials for the Twitter API\r\nwith open('auth.txt') as f: \r\n data = f.read()\r\nauthDict = json.loads(data)\r\nauth = tweepy.OAuthHandler(authDict['consumer_key'],authDict['consumer_secret'])\r\napi = tweepy.API(auth)\r\n\r\nclass EmojiStreamListener(tweepy.StreamListener):\r\n \"\"\"\r\n Implementation of the Abstract StreamListener object provided in the library. This library is not thread safe.\r\n \"\"\"\r\n def __init__(self):\r\n self.cache_length = 5000\r\n self.cache=[]\r\n if not os.path.exists(os.getcwd()+\"\\\\Tweets\\\\\"):\r\n os.makedirs(os.getcwd()+\"\\\\Tweets\\\\\")\r\n def on_error(self, status_code):\r\n print(status_code)\r\n def on_status(self, status):\r\n \"\"\"\r\n param: status\r\n takes a status, adds it to the internal cache and flushes the cache to disk if it is large enough\r\n No need to check emojis as it is only streaming statuses with Emojis.\r\n \"\"\"\r\n self.cache.append(status.text)\r\n if len(self.cache)>self.cache_length:\r\n with open(os.getcwd()+\"\\\\Tweets\\\\{0}.json\".format(datetime.datetime.now().strftime(\"%b %d %y \"\r\n \"%H-%M-%S%Z\")),\"a+\") as jsonf:\r\n json.dump(self.cache,jsonf)\r\n\r\nemojiStreamListener = EmojiStreamListener()\r\nemojiStream = tweepy.Stream(auth=api.auth,listener=emojiStreamListener)\r\nemojiList = [e for e in emoji.UNICODE_EMOJI['en'].keys()] #Getting a list of all emojis in the Emoji hashmap\r\nemojiStream.filter(track=emojiList) #Makes sure the only statuses we get have emojis in them","sub_path":"TwitterIngest.py","file_name":"TwitterIngest.py","file_ext":"py","file_size_in_byte":1706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"42655308","text":"import datetime\n\ntoday = datetime.date.today()\n\ncurrent_year = datetime.date.today().year\n\nyear = int(input('Year: '))\nmonth = int(input(\"Month: \"))\nday = int(input(\"Day: \"))\nname = str(input(\"Name: \"))\n\nbirthdate = current_year - year\n\nbirthday = datetime.date(year, month, day)\n\ndays_since_birth = (today - birthday).days\n\nprint(name, \"has have lived for\", days_since_birth, \"days on earth and is currently\", birthdate, \"Years old.\")","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"534667812","text":"import tkinter as tk #GUI library\nfrom tkinter import filedialog, Text, StringVar #GUI options\nimport tkinter.ttk as ttk #GUI table module\nfrom tkinter.ttk import Treeview #import table module\nimport os #OS access to open file\nimport interface\nimport sqlite3\n\n\ndef rfidkeytext():\n global card_key\n card_key.set(interface.rfidkey_get())\n return card_key\n\n#get item from textbox where 1.0 means line 1 char 0 and end -1c means read to end of doccument -the last charachter \\n or\\r\ndef NameSave(argz):\n FullName.set(argz.get('1.0', 'end -1c'))\n return FullName.get()\n \ndef addToDb(name,rfid_id):\n name = name.upper()\n try:\n db_conn = sqlite3.connect('test.db')\n db_currsor = db_conn.cursor()\n print('SQL CONNECTION PASSED!')\n except sqlite3.Error as error:\n print(f'SQL CONNECTION FAILED! ERROR : {error}')\n Check = db_conn.execute(\n \"SELECT id FROM rfid_acess WHERE b_key IS ? OR full_name IS ?;\", (rfid_id, name))\n Check = Check.fetchone()\n if Check == None:\n db_currsor.execute(f\"INSERT INTO rfid_acess (b_key, full_name) VALUES (?,?);\", (rfid_id, name))\n db_conn.commit()\n Add_Status.set('Sucess')\n else:\n Add_Status.set(f'Name or ID exist in database with id = {Check[0]}')\n if db_conn:\n db_conn.close()\n \n\ndef rmFromDb(name, rfid_id):\n name = name.upper()\n try:\n db_conn = sqlite3.connect('test.db')\n db_currsor = db_conn.cursor()\n print('SQL CONNECTION PASSED!')\n except sqlite3.Error as error:\n print(f'SQL CONNECTION FAILED! ERROR : {error}')\n Check = db_conn.execute(\n \"SELECT id FROM rfid_acess WHERE b_key IS ? OR full_name IS ?;\" , (rfid_id, name))\n Check = Check.fetchone()\n if Check != None:\n db_currsor.execute(\n f\"DELETE FROM rfid_acess WHERE b_key IS ? OR full_name IS ?;\" , (rfid_id, name))\n db_conn.commit()\n Add_Status.set('Sucess')\n else:\n Add_Status.set('Name or ID does not exist in database')\n if db_conn:\n db_conn.close()\n\ndef list_acess():\n l_acess_window = tk.Tk()\n l_acess_window.title('RFID Acess Table')\n l_acess_window.geometry('400x800')\n try:\n db_conn = sqlite3.connect('test.db')\n print('SQL CONNECTION PASSED!')\n except sqlite3.Error as error:\n print(f'SQL CONNECTION FAILED! ERROR : {error}')\n acess = db_conn.execute('SELECT * FROM rfid_acess;')\n acess = acess.fetchall()\n list_table = ttk.Treeview(l_acess_window)\n list_table['columns'] = ('RFID Key', 'Name')\n \n list_table.column('#0', width = 50, minwidth = 25)\n list_table.column('RFID Key', width = 200, minwidth = 175)\n list_table.column('Name', width=200, minwidth=175)\n\n list_table.heading('#0', text='ID')\n list_table.heading('RFID Key', text = 'RFID Key')\n list_table.heading('Name', text='Name')\n \n for index, row in enumerate(acess):\n list_table.insert('',index,text =row[0], values = row[1:])\n list_table.pack(fill = 'both', expand = True)\n\ndef list_history():\n l_history_window = tk.Tk()\n l_history_window.title('RFID Acess Table')\n l_history_window.geometry('800x600')\n try:\n db_conn = sqlite3.connect('test.db')\n print('SQL CONNECTION PASSED!')\n except sqlite3.Error as error:\n print(f'SQL CONNECTION FAILED! ERROR : {error}')\n acess = db_conn.execute('SELECT * FROM rfid_history;')\n acess = acess.fetchall()\n list_table = ttk.Treeview(l_history_window)\n list_table['columns'] = ('RFID Key', 'Name')\n \n list_table.column('#0', width = 200, minwidth = 175)\n list_table.column('RFID Key', width = 200, minwidth = 175)\n list_table.column('Name', width=200, minwidth=175)\n\n list_table.heading('#0', text='Date and Time')\n list_table.heading('RFID Key', text = 'RFID Key')\n list_table.heading('Name', text='Name')\n \n for index, row in enumerate(acess):\n list_table.insert('',index,text =row[0], values = row[1:])\n list_table.pack(fill = 'both', expand = True)\n\n\n#file structue\nroot = tk.Tk()\nroot.title('RFID Manager')\n\nroot['background'] = '#83C3C8'\n#updatable stringvar variabler\ncard_key = StringVar() \nFullName = StringVar()\nAdd_Status = StringVar()\n\n#prevent app resizability \nroot.resizable(height = False, width = False)\n\n#canvas options and commit canvas\ncanvas = tk.Canvas(root, height=240, width=600, bg='#83C3C8') \ncanvas.pack() \n\n#palce and configure frame\nframe = tk.Frame(root, bg ='white') #creare frame\nframe.place(relwidth = 0.8, relheight = 0.8 , relx = 0.1, rely = 0.1) \n\nTitle_label = tk.Label(frame, text='RFID Manager', font=200, bg='white')\nTitle_label.grid(row = 0, column = 1, sticky = 'E')\n\n\n# insert a table at coordiates x,y\nName_label = tk.Label(frame, text='Full Name : ', font=50, bg = 'white')\nName_label.grid(row = 1, column = 0 )\n\n#Adds text editor\nedit_name = tk.Text(frame, height = 1, width = 20, relief = 'solid')\nedit_name.grid(row=1, column=1) \n\nkey_label = tk.Label(frame, text='RFID_key : ', font=50, bg='white')\nkey_label.grid(row=2, column=0)\n\nkey_label = tk.Label(frame, textvariable=card_key, font=20, bg='white')\nkey_label.grid(row=2, column=1)\n\nrfid_key_button = tk.Button(frame, text = 'Scan Card', padx = 10, pady = 5, bg = 'White', command = rfidkeytext)\nrfid_key_button.grid(row = 2, column = 2)\n \n#Adds Button and packs it to the master(root)\nAddUserButton = tk.Button(frame, text='Add User', padx=10, pady=5,\n bg='White', command=lambda: addToDb(NameSave(edit_name), card_key.get()))\nAddUserButton.grid(row = 4, column = 2, sticky='W')\nDeleteUserButton = tk.Button(frame, text='Delete User', padx=10, pady=5,\n bg='White', command=lambda: rmFromDb(NameSave(edit_name), card_key.get()))\nDeleteUserButton.grid(row=4, column=3, sticky = 'E' )\n\nStatus_adduser_txt = tk.Label(frame, text = 'Status: ', font=50, bg='white')\nStatus_adduser = tk.Label(\n frame, textvariable=Add_Status, font=50, bg='white', wraplength=200)\nStatus_adduser_txt.grid(row=3, column=0 )\nStatus_adduser.grid(row=3, column=1)\n\nList_RFID_Acess = tk.Button(frame, text='Show Users',\n padx=10, pady=5, bg='White', command= list_acess)\nList_RFID_Acess.grid(row = 5, column = 0, sticky = 'E')\n\nList_RFID_History = tk.Button(frame, text='Show History',\n padx=10, pady=5, bg='White', command=list_history)\nList_RFID_History.grid(row=5, column=1)\n#runs software\nroot.mainloop()\n\n\n","sub_path":"RFID_manager.py","file_name":"RFID_manager.py","file_ext":"py","file_size_in_byte":6494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"257602934","text":"from ryu.base import app_manager\nfrom ryu.controller import ofp_event\nfrom ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER\nfrom ryu.controller.handler import set_ev_cls\nimport ryu.ofproto.ofproto_v1_3_parser as parser\nimport ryu.ofproto.ofproto_v1_3 as ofproto\nfrom ryu.lib.packet import packet\nfrom ryu.lib.packet import ether_types\nfrom ryu.lib.packet import ethernet, arp, ipv4, ipv6, tcp\n\nfrom cockpit import CockpitApp\nfrom netaddr import IPAddress, IPNetwork\n\n#tm task=security1\n\nETHERTYPES = {2048: \"IPv4\", 2054: \"ARP\", 34525: \"IPv6\"}\nL4PROTO = {1: \"ICMP\", 4: \"IP-in-IP\", 6: \"TCP\", 17: \"UDP\"}\n\nclass SecureGateway(CockpitApp):\n ## Initialize SDN-App\n def __init__(self, *args, **kwargs):\n super(SecureGateway, self).__init__(*args, **kwargs)\n self.pkt_count = {}\n\n ## You already know this function from the lab\n def debug_output(self, dp, pkt, in_port):\n eth = pkt.get_protocol(ethernet.ethernet)\n\n self.pkt_count[dp.id] += 1\n\n print(\"/// [Switch {}]: PACKET-IN (#{}) on port: {}\".format(dp.id, self.pkt_count[dp.id], in_port))\n# print(\" SRC: {}, DST: {} --> {}\".format(eth.src, eth.dst, ETHERTYPES[eth.ethertype]))\n\n# ## Info: IP Packet\n# if eth.ethertype == ether_types.ETH_TYPE_IP:\n# ip_pkt = pkt.get_protocol(ipv4.ipv4)\n# print(\" {:17}, {:17} --> {}\".format(ip_pkt.src, ip_pkt.dst, L4PROTO[ip_pkt.proto]))\n#\n# ## Info: TCP Packet\n## if ip_pkt.proto == 6:\n## tcp_pkt = pkt.get_protocol(tcp.tcp)\n## print(\" SRC-PORT: {}, DST-PORT: {}, SEQ: {}, ACK: {}\".format(tcp_pkt.src_port, tcp_pkt.dst_port, tcp_pkt.seq, tcp_pkt.ack))\n\n# if eth.ethertype == ether_types.ETH_TYPE_ARP:\n# arp_pkt = pkt.get_protocol(arp.arp)\n# print(\" [ARP] SRC-MAC: {}, SRC-IP: {}; DST-MAC: {} DST-IP: {}\".format(arp_pkt.src_mac, arp_pkt.src_ip, arp_pkt.dst_mac, arp_pkt.dst_ip))\n\n ## When a new switch connects to the controller\n @set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)\n def switch_features_handler(self, ev):\n dp = ev.msg.datapath\n\n self.pkt_count[dp.id] = 0\n\n ## some debug output\n print(\"\")\n print(\"\")\n print(\"/// Switch connected. ID: {}\".format(dp.id))\n\n ## default \"all to controller\" flow\n match = parser.OFPMatch()\n action = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER)]\n self.program_flow(dp, match, action, priority=0, idle_timeout=0, hard_timeout=0)\n\n # flood ARP (this is just to get ping to work)\n match = parser.OFPMatch(\n eth_type = ether_types.ETH_TYPE_ARP\n )\n action = [parser.OFPActionOutput(ofproto.OFPP_FLOOD)]\n self.program_flow(dp, match, action, priority=1, idle_timeout=0, hard_timeout=0)\n\n # set up some proactive flows\n self.flow_example(dp)\n #self.flow_example_groups(dp)\n\n ## When a new packet comes in at the controller -- \"PACKET-IN\"\n @set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)\n def packet_in_handler(self, ev):\n # all info is stored in the ev object, extract some relevant fields\n msg = ev.msg\n dp = msg.datapath\n in_port = msg.match[\"in_port\"]\n data = msg.data\n pkt = packet.Packet(data)\n eth = pkt.get_protocol(ethernet.ethernet)\n ip = pkt.get_protocol(ipv4.ipv4)\n\n # ignore LLDP Packets\n if eth.ethertype == ether_types.ETH_TYPE_LLDP:\n return\n\n self.debug_output(dp, pkt, in_port)\n\n def flow_example(self, dp):\n # a flow to allow h1 to l1 traffic, matching ip src and dst \n match = parser.OFPMatch(\n eth_type = ether_types.ETH_TYPE_IP,\n ipv4_src = '11.0.0.1/8',\n ipv4_dst = '22.0.0.1/8'\n )\n # on a match the packet is sent out the correct switch port as configured \n action = [parser.OFPActionOutput(2)]\n self.program_flow(dp, match, action, priority=1, idle_timeout=0, hard_timeout=0)\n \n # a flow to allow l1 to h1 traffic, matching ip src and dst\n match = parser.OFPMatch(\n eth_type = ether_types.ETH_TYPE_IP,\n ipv4_src = '22.0.0.1/8',\n ipv4_dst = '11.0.0.1/8'\n )\n # on a match the packet is sent out the correct switch port as configured\n action = [parser.OFPActionOutput(1)]\n self.program_flow(dp, match, action, priority=1, idle_timeout=0, hard_timeout=0)\n \n # a flow to allow h1 to r1 traffic, matching ip src and dst\n match = parser.OFPMatch(\n eth_type = ether_types.ETH_TYPE_IP,\n ipv4_src = '11.0.0.1/8',\n ipv4_dst = '33.0.0.1/8'\n )\n # on a match the packet is sent out the correct switch port as configured\n action = [parser.OFPActionOutput(3)]\n self.program_flow(dp, match, action, priority=1, idle_timeout=0, hard_timeout=0)\n \n # a flow to allow r1 to h1 traffic, matching ip src and dst\n match = parser.OFPMatch(\n eth_type = ether_types.ETH_TYPE_IP,\n ipv4_src = '33.0.0.1/8',\n ipv4_dst = '11.0.0.1/8'\n )\n # on a match the packet is sent out the correct switch port as configured\n action = [parser.OFPActionOutput(1)]\n self.program_flow(dp, match, action, priority=1, idle_timeout=0, hard_timeout=0)\n # \n # now to create flows to drop specific traffic\n # \n # a flow to drop m1 to h1 traffic\n # match = parser.OFPMatch(\n # eth_type = ether_types.ETH_TYPE_IP,\n # ipv4_src = '44.0.0.1/8',\n # ipv4_dst = '11.0.0.1/8'\n # )\n # An empty action list indicates a drop rule\n # self.program_flow(datapath, match, [], priority = 2)\n # \n # a flow to drop h1 to m1 traffic\n # match = parser.OFPMatch(\n # eth_type = ether_types.ETH_TYPE_IP,\n # ipv4_src = '11.0.0.1/8',\n # ipv4_dst = '44.0.0.1/8'\n # )\n # An empty action list indicates a drop rule\n # self.program_flow(datapath, match, [], priority = 2)\n # \n # a flow to drop m1 to l1 traffic\n # match = parser.OFPMatch(\n # eth_type = ether_types.ETH_TYPE_IP,\n # ipv4_src = '44.0.0.1/8',\n # ipv4_dst = '22.0.0.1/8'\n # )\n # An empty action list indicates a drop rule\n # self.program_flow(datapath, match, [], priority = 2)\n # \n # a flow to drop l1 to m1 traffic\n # match = parser.OFPMatch(\n # eth_type = ether_types.ETH_TYPE_IP,\n # ipv4_src = '44.0.0.1/8',\n # ipv4_dst = '22.0.0.1/8'\n # )\n # An empty action list indicates a drop rule\n # self.program_flow(datapath, match, [], priority = 2) \n #\n # a flow to drop m1 to r1 traffic\n # match = parser.OFPMatch(\n # eth_type = ether_types.ETH_TYPE_IP,\n # ipv4_src = '44.0.0.1/8',\n # ipv4_dst = '33.0.0.1/8'\n # )\n # An empty action list indicates a drop rule\n # self.program_flow(datapath, match, [], priority = 2)\n # \n # a flow to drop r1 to m1 traffic\n # match = parser.OFPMatch(\n # eth_type = ether_types.ETH_TYPE_IP,\n # ipv4_src = '44.0.0.1/8',\n # ipv4_dst = '33.0.0.1/8'\n # )\n # An empty action list indicates a drop rule\n # self.program_flow(datapath, match, [], priority = 2) ","sub_path":"Application/configured.py","file_name":"configured.py","file_ext":"py","file_size_in_byte":7571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"278069627","text":"from application.models.entity import Category\n\n\ndef category(request):\n \"\"\"\n Show categories in footer\n :param request:\n :return: categories with limit\n \"\"\"\n context_data = dict()\n limit = 5\n context_data['categories'] = Category.objects.filter(status=True)[:limit]\n return context_data\n","sub_path":"application/context/context_processors.py","file_name":"context_processors.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"10396943","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n # O(n) space O(n) time\n def levelOrder(self, root: TreeNode) -> List[List[int]]:\n if root == None:\n return []\n \n level_dict = {root: 0}\n \n queue = deque()\n queue.append(root)\n \n bfs_array = []\n \n while len(queue) != 0:\n current_node = queue.popleft()\n bfs_array.append(current_node)\n # Add children to queue AND Add children to dictionary\n if current_node.left != None:\n queue.append(current_node.left)\n level_dict[current_node.left] = level_dict[current_node] + 1\n \n if current_node.right != None:\n queue.append(current_node.right)\n level_dict[current_node.right] = level_dict[current_node] + 1\n \n final_array = []\n temp_array = []\n current_level = 0\n \n for i in range(len(bfs_array)):\n print(i, bfs_array[i].val)\n level = level_dict[bfs_array[i]]\n \n if level != current_level:\n final_array.append(temp_array)\n temp_array = []\n current_level += 1\n temp_array.append(bfs_array[i].val)\n \n final_array.append(temp_array)\n \n return final_array\n","sub_path":"medium/binary_tree_level_order_top.py","file_name":"binary_tree_level_order_top.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"348426852","text":"'''\nCreated on 16-Nov-2013\n\n@author: Meghana M Reddy\n'''\nfrom bisect import bisect_left\n\nDENSITY_THRESHOLD = 0.4\nSIZE_THRESHOLD = 5\n\ndef is_long_and_sparse(lst, zero_test):\n '''\n Checks if it is worth using a sparse representation for a vector (elements given in the argument 'lst')\n '''\n count = 0\n for i in lst:\n if zero_test(i) :\n count = count + 1\n \n non_zeros = len(lst) - count\n \n if float(non_zeros)/float(len(lst)) < DENSITY_THRESHOLD and len(lst) > SIZE_THRESHOLD:\n return True\n else:\n return False\n \n \n \ndef make_vector(data, zero_test):\n '''\n Make a vector out of the list of data values in 'data'\n Depending on whether this list passes the 'is_long_and_sparse' test, either instantiate the FullVector class\n or the SparseVector class\n '''\n if(is_long_and_sparse(data , zero_test)):\n list_values = []\n list_indices = []\n for idx , i in enumerate(data):\n if not(zero_test(i)) :\n list_values.append(i)\n list_indices.append(idx)\n \n vector = SparseVector(list_values , list_indices , len(data) , zero_test)\n else:\n vector = FullVector(data)\n \n return vector\n \n \n\nclass Vector(object):\n '''\n Base Vector class - implements a number of the common methods required for a Vector\n '''\n def __init__(self , lst , zero_test = lambda x : (x == 0)):\n '''\n Have a data attribute that is initialized to the list of elements given in the argument 'lst'\n zero_test is a function that tests if a given element is zero (remember you could potentially\n have a vector of complex numbers, even a vector of functions, ... not just numbers \n '''\n \n self.data = lst\n self.zero_test = zero_test\n\n\n\n def __len__(self):\n '''\n Returns the length of the vector (this method allows you to use the built-in function len()\n on any object of type Vector.\n '''\n \n return (len(self.data))\n\n\n\n def __getitem__(self, i):\n '''\n Return the i-th element of the vector (allows you to use the indexing operator [] on a Vector object)\n '''\n \n return self.data[i]\n \n \n\n def __setitem__(self, i, val):\n '''\n Set the i-th element of the vector to 'val' using the indexing operator [] on a Vector object\n '''\n \n self.data[i] = val\n \n \n \n def is_zero(self):\n '''\n Check if the vector object is identically zero (all elements are zero)\n '''\n \n for i in self.data:\n if not(self.zero_test(i)):\n return False\n \n return True\n \n\n\n def components(self):\n '''\n Allows one to iterate through the elements of the vector as shown below\n for elem in vector.components(): (vector is an object of type Vector or any of its derived classes)\n '''\n \n for i in xrange(len(self)):\n yield self[i]\n\n\n\n def __eq__(self , vector):\n '''\n Check if this vector is identical to another 'vector' (allows use of operator == to compare vectors)\n '''\n \n for i in range(0 , len(self)):\n if(self[i] != vector[i]):\n return False\n \n return True\n \n \n \n def __mul__(self , vector):\n '''\n Return the inner(dot)-product of this vector with another 'vector' (allows use of * operator between vectors)\n Assumes that the elements of the vectors have a * operator defined between them (if they are not numbers)\n If the lengths of this and 'vector' are not the same, then return None\n '''\n \n scalar_sum = 0\n \n if(len(vector) != len(self)):\n return None\n \n for i in range(0 , len(self)):\n scalar_sum = scalar_sum + vector[i] * self[i]\n \n return scalar_sum\n \n\n\n def __add__(self, vector):\n '''\n Return the sum of this vector with another 'vector' (allows use of + operator between vectors)\n Use the make_vector function to instantiate the appropriate subclass of Vector\n Assumes that the elements of the vectors have a + operator defined between them (if they are not numbers)\n If the lengths of this and 'vector' are not the same, then return None\n '''\n sum_list = []\n \n if(len(vector) != len(self)):\n return None\n \n for i in range(0 , len(self)):\n sum_list.append (vector[i] + self[i])\n \n return make_vector(sum_list , self.zero_test)\n \n \n \n def __sub__(self, vector):\n '''\n Return the difference of this vector with another 'vector' (allows use of - operator between vectors)\n Use the make_vector function to instantiate the appropriate subclass of Vector\n Assumes that the elements of the vectors have a - operator defined between them (if they are not numbers)\n If the lengths of this and 'vector' are not the same, then return None\n '''\n \n sub_list = []\n \n if(len(vector) != len(self)):\n return None\n \n for i in range(0 , len(self)):\n sub_list.append (self[i] - vector[i] )\n \n return make_vector(sub_list , self.zero_test)\n\n \n \n def __iadd__(self, vector):\n '''\n Implements the += operator with another 'vector'\n Assumes that the elements of the vectors have a + operator defined between them (if they are not numbers)\n Add corresponding elements upto the min of the two lengths (in case the vectors are of different lengths)\n '''\n for i in range(0 , min(len(self) , len(vector))):\n self[i] = self[i] + vector[i]\n \n return make_vector(self.data , self.zero_test)\n \n \n\n def __isub__(self, vector):\n '''\n Implements the -= operator with another 'vector'\n Assumes that the elements of the vectors have a - operator defined between them (if they are not numbers)\n Subtract corresponding elements upto the min of the two lengths (in case the vectors are of different lengths)\n '''\n for i in range(0 , min(len(self) , len(vector))):\n self[i] = self[i] - vector[i]\n\n return make_vector(self.data , self.zero_test)\n \n \n \n def split(self):\n '''\n Split the vector into two halves - left and right\n Return two vectors separately\n '''\n length = len(self)\n mid = length/2\n first_half = []\n second_half = []\n \n for i in (0 , mid):\n first_half.append(self[i])\n \n for i in (mid , length):\n second_half.append(self[i])\n \n make_vector(first_half , self.zero_test)\n make_vector(second_half , self.zero_test)\n \n return first_half , second_half\n \n \n \nclass FullVector(Vector):\n '''\n A subclass of Vector where all elements are kept explicitly as a list\n '''\n def __init__(self , lst , zero_test = lambda x : (x == 0)):\n '''\n Constructor for a FullVector on data given in the 'lst' argument - 'lst' is the list of elements in the vector\n Uses the base (parent) class attributes data and zero_test\n '''\n super(FullVector, self).__init__(lst, zero_test)\n self.data = lst\n self.zero_test = zero_test\n\n def split(self):\n '''\n Split the vector into two halves - left and right\n Return two (full) vectors separately\n This overrides the default implementation of this method in the Vector Class\n '''\n \n length = len(self)\n mid = length/2\n first_half = []\n second_half = []\n \n for i in range(0,mid):\n first_half.append(self[i])\n for i in range(mid,length):\n second_half.append(self[i])\n \n first_vect = make_vector(first_half , self.zero_test)\n second_vect = make_vector(second_half , self.zero_test)\n \n return first_vect , second_vect\n \n \n \n def merge(self, vector):\n '''\n Merge this vector with 'vector' - append the elements together (this followed by 'vector')\n '''\n \n for i in vector:\n self.data.append(i)\n \n\n\nclass SparseVector(Vector):\n '''\n Vector that has very few non-zero entries\n Values and corresponding indices are kept in separate lists\n '''\n def __init__(self, values, indices, length = 0, zero_test = lambda x : (x == 0)):\n '''\n 'values' argument is the list of non-zero values and the corresponding indices are in the list 'indices'\n Uses the base (parent) class attributes data (this is where 'values' are kept) and zero_test\n Length is the length of the vector - the number of entries in 'values' is just the number of non-zero entries\n You can assume that the number of entries in values and indices is the same.\n '''\n \n super(SparseVector, self).__init__(values, zero_test)\n self.data = values\n self.indices = indices\n self.length = length\n self.zero_test = zero_test\n \n\n\n def __len__(self):\n '''\n Overriding the default __len__ method with behavior specific to sparse vectors\n '''\n \n return self.length\n\n\n\n def __getitem__(self, i):\n '''\n Overriding the default __getitem__ method with behavior specific to sparse vectors\n '''\n \n if i in self.indices:\n idx = bisect_left(self.indices,i)\n return self.data[idx]\n \n return 0\n \n \n \n def __setitem__(self, i, val):\n '''\n Overriding the default __setitem__ method with behavior specific to sparse vectors\n Locate the index i and if it is not already there insert appropriate values into data and indices\n If the index i is there then update the corresponding value to 'val'\n '''\n \n \n idx = bisect_left(self.indices,i)\n \n if i in self.indices:\n self.data[idx] = val\n \n elif(idx>self.indices[-1]):\n self.data.append(val)\n self.indices.append(i)\n \n else:\n self.data.insert(idx,val)\n self.indices.insert(idx,i)\n\n\n\n def is_zero(self):\n '''\n Overriding the default is_zero method specific to sparse vectors\n '''\n \n \n if self.data == []:\n return True\n \n return False\n\n\n\n def split(self):\n '''\n Split the vector into two halves - left and right\n Return two (sparse) vectors separately\n This overrides the default implementation of this method in the Vector Class\n '''\n \n \n sparse_list = [0] * self.length\n j = 0\n \n for i in self.indices:\n sparse_list[i] = self.data[j]\n j = j+1\n \n length = len(sparse_list)\n mid = length/2\n \n first_half = sparse_list[:mid]\n second_half = sparse_list[mid:]\n \n first_vector = make_vector(first_half , self.zero_test)\n second_vector = make_vector(second_half , self.zero_test)\n \n return first_vector , second_vector\n \n\n def merge(self, vector):\n '''\n Merge this vector with 'vector' - append the elements together (this followed by 'vector')\n '''\n \n count = len(self)\n for i in range(0 , len(vector)):\n if vector[i] != 0:\n self.data.append(vector[i])\n self.indices.append(count)\n count = count + 1\n \n","sub_path":"vector.py","file_name":"vector.py","file_ext":"py","file_size_in_byte":11988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"235246321","text":"import numpy as np\nfrom sklearn import preprocessing, model_selection, neighbors\nimport pandas as pd\n\ndf = pd.read_csv('breast-cancer-wisconsin.data')\ndf.replace('?', -99999, inplace=True)\n#any data with ? replaces the outliners\ndf.drop(['id'], 1, inplace=True)\n#id is not important so we are dropping it. If we dont drop the id, the accuracy goes from 96% to 50%\n\nX = np.array(df.drop(['class'],1))\n# x for features\ny = np.array(df['class'])\n# y for labels / class\n\nX_train, X_test, y_train, y_test = model_selection.train_test_split(X,y,test_size=0.2)\n\nclf = neighbors.KNeighborsClassifier()\nclf.fit(X_train,y_train)\n\naccuracy = clf.score(X_test, y_test)\nprint(accuracy)\n\nexample_measures = np.array([[4,2,1,1,1,2,3,2]])\nexample_measures = example_measures.reshape(len(example_measures),-1)\nprediction = clf.predict(example_measures)\nprint(prediction)\n","sub_path":"KNN/knn.py","file_name":"knn.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"406228744","text":"from matplotlib.pyplot import *\n\nimport lktrack\n\n\nimnames1 = ['viff.000.ppm', 'viff.001.ppm',\n 'viff.002.ppm', 'viff.003.ppm', 'viff.004.ppm']\n# imnames2 = ['viff.004.ppm', 'viff.002.ppm',\n# 'viff.002.ppm', 'viff.001.ppm', 'viff.000.ppm']\n\nlkt1 = lktrack.LKTracker(imnames1)\nfor im1, ft1 in lkt1.track():\n print('tracking %d clockwise features' % len(ft1))\n\n# lkt2 = lktrack.LKTracker(imnames2)\n# for im2, ft2 in lkt2.track():\n# print('tracking %d anticlockwise features' % len(ft2))\n\n\n# 画出轨迹\nfigure(0)\n# # subplot(1, 2, 1)\nimshow(im1)\nfor p in ft1:\n plot(p[0], p[1], 'bo')\nfor t in lkt1.tracks:\n plot([p[0] for p in t], [p[1] for p in t])\naxis('off')\n\n# subplot(1, 2, 2)\n# imshow(im2)\n# for p in ft2:\n# plot(p[0], p[1], 'bo')\n# for t in lkt2.tracks:\n# plot([p[0] for p in t], [p[1] for p in t])\n# axis('off')\nshow()\n\n\n# lkt1.draw()\n# show()\n","sub_path":"ACV/dinosaur.py","file_name":"dinosaur.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"325095735","text":"import pygame\nimport math\nfrom random import randint\nfrom win32api import GetSystemMetrics #pip install pywin32\nimport os\n#display setup\nos.environ['SDL_VIDEO_WINDOW_POS'] = \"%d,%d\" % (0,30)\npygame.init()\nsize = [GetSystemMetrics(0),GetSystemMetrics(1) - 30]\nscreen = pygame.display.set_mode(size)\npygame.display.set_caption('Spirograph')\n\ndef increment_color(color, colorstep): #color incremenring algorithm\n if colorstep == 0:\n return color\n elif color[2] == 0:\n if color[0] - colorstep < 0:\n return (0, 255 - colorstep + color[0],colorstep - color[0])\n else:\n return(color[0] - colorstep, color[1] + colorstep, 0)\n elif color[0] == 0:\n if color[1] - colorstep < 0:\n return (colorstep - color[1], 0, 255 - colorstep + color[1])\n else:\n return(0, color[1] - colorstep, color[2] + colorstep)\n else : #color[1] == 0\n if color[2] - colorstep < 0:\n return (255 - colorstep + color[2], colorstep - color[2], 0)\n else:\n return(color[0] + colorstep, 0, color[2] - colorstep)\ndef random_color():#gets random color with correct amout of rbg values\n random_indecies = [0,0]\n rgb = [0,0,0]\n\n random_indecies[0] = randint(0,2);\n random_indecies[1] = (random_indecies[0] + (randint(0,1) * 2 - 1)) % 3\n\n for i in range(255):\n rgb[random_indecies[randint(0,1)]] += 1\n return (rgb[0],rgb[1],rgb[2])\n\ndef draw_shapes(shapes = 1, sides = 3, radius = 1, direction = 0, current_point = [0,0], color = (0,0,0), colorsteps = 1, colorstep = 0, concurrency = True):\n start_color = color\n if color[0] + color[1] + color[2] > 255: # makes sure your colors are the right values\n colorsteps = 1\n colorstep = 0\n print(\"Your color value excedes a sum of 255, single color mode engaged\")\n for shape in range(shapes): #make shapes\n if start_color[0] == 0 and start_color[1] == 0 and start_color[2] == 0:\n color = random_color()\n #old equation\n #side_length = radius * math.sin(math.pi/sides) #see relationship equation on desmos\n side_length = (2 ** .5) * radius * math.sin(math.pi/sides) * (1-math.cos(math.pi* ( (sides - sides % 2) /sides) )) ** -.5\n if not concurrency:\n color = start_color\n for side in range(sides): #draw a side\n if exit():\n return\n for step in range(colorsteps): #draw side with certain number of color changes\n if exit():\n return\n next_point = [current_point[0] + side_length/colorsteps * math.cos(direction),\n current_point[1] + side_length/colorsteps * math.sin(direction)]\n pygame.draw.line(screen, color, current_point, next_point, 1)\n current_point = next_point\n pygame.display.flip()\n color = increment_color(color,colorstep = colorstep)\n direction += 2 * math.pi / sides\n direction += 2 * math.pi / shapes\n while True:#wait after shape is drawn\n if exit():\n return\ndef exit():\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n return True\n return False\ndef main(): #comment and add shape functions to to draw them\n #draw_shapes(radius = 400,shapes = 96, sides = 32, current_point = [size[0]/2, size[1]/2], colorsteps = 8, colorstep = 2)\n draw_shapes(radius = size[0] / 2 ** .5,shapes = 512, sides = 3, current_point = [size[0]/2, size[1]/2], colorsteps = 2, colorstep = 2, concurrency = True)\n #draw_shapes(radius = size[0] / 2 ** .5,shapes = 192, sides = 32, current_point = [size[0]/2, size[1]/2], color = (120,125,0), colorsteps = 24, colorstep = 24, concurrency = False)\n #draw_shapes(radius = size[0] / 2 ** .5,shapes = 512, sides = 5, current_point = [size[0]/2, size[1]/2], color = (0,255,0), colorsteps = 24, colorstep = 48, concurrency = True)\n #draw_shapes(radius = size[0] / 2 ** .5,shapes = 512, sides = 4, current_point = [size[0]/2, size[1]/2], color = (0,255,0), colorsteps = 24, colorstep = 48, concurrency = True)\n #draw_shapes(radius = 300,shapes = 100, sides = 5, current_point = [size[0]/2, size[1]/2], color = (255,255,255), colorsteps = 24, colorstep = 48, concurrency = True)\n\nmain()\n","sub_path":"spirograph.py","file_name":"spirograph.py","file_ext":"py","file_size_in_byte":4353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"211983585","text":"import csv\nimport os\nimport argparse\nimport logging\nimport tempfile\nfrom utils import yes_or_no\nfrom utils import initialize_logger\nfrom utils import get_remote_sha_sum\nfrom utils import check_header\n\n\n\ndef get_args():\n example_text = '''\n examples:\n\n python opendiffit/%(add_hash)s --input-file=\"report.csv\" --output-file=\"report_hashed.csv\"\n\n ''' % {'add_hash': os.path.basename(__file__)}\n\n parser = argparse.ArgumentParser(epilog=example_text, formatter_class=argparse.RawTextHelpFormatter)\n parser.add_argument('-i', '--input-file', help='original csv')\n parser.add_argument('-o', '--output-file', help='hashed version of csv. Use \"-\" overwrite the current file (keep a backup).')\n return parser.parse_args()\n\n\ndef add_hash(input_file,output_file):\n \"\"\" Add new column with hash \"\"\"\n with open(input_file, 'r', encoding='utf-8-sig') as r_csvfile, \\\n open(output_file, 'w', encoding='utf-8-sig') as w_csvfile:\n reader = csv.DictReader(r_csvfile)\n fieldnames = reader.fieldnames + ['hash','comply']\n writer = csv.DictWriter(w_csvfile, fieldnames=fieldnames)\n writer.writeheader()\n\n for row in reader:\n try:\n row['hash'] = get_remote_sha_sum(row['url'])\n writer.writerow(row)\n logging.info(\"Hashing...\")\n except Exception as ex:\n logging.error(ex)\n logging.info(\"Hashing complete.\")\n\ndef main():\n \"\"\" Pass arguments, check csv validity, and add hash \"\"\"\n args = get_args()\n input_file = args.input_file\n output_file = args.output_file\n output_dir = os.path.dirname(args.input_file)\n initialize_logger('add_hash', output_dir)\n if output_file == \"-\":\n # yes_or_no(\"Are you sure you want to add hashes to the '%s' file? (keeping a backup is recommended)\" % (input_file))\n output_file = tempfile.gettempdir() + 'tmp.csv'\n try:\n if check_header(input_file,['url'],['hash']):\n add_hash(input_file,output_file)\n os.remove(input_file)\n os.rename(output_file, input_file)\n\n except Exception as ex:\n logging.error(ex)\n\nif __name__ == '__main__':\n main()","sub_path":"opendiffit/add_hash.py","file_name":"add_hash.py","file_ext":"py","file_size_in_byte":2193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"185246067","text":"import numpy as np\r\n\r\nN = 4\r\nM = 6\r\n\r\nA = np.random.randint(-10, 10, (N, M))\r\nprint(\"Матрица:\" + str(A))\r\n\r\nsum = A.sum(axis=0)\r\n\r\ni = sum.argmin(axis=0)\r\n\r\nmin = A.min(axis=0)\r\n\r\nmin = min[i]\r\n\r\nprint(\"Минимальный элемент: \" + str(min))","sub_path":"Копытов/2/задача 3.py","file_name":"задача 3.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"413088394","text":"from pyfiglet import Figlet\nfrom termcolor import colored\n\nmessage = input(\"What message do you want to print?\")\ncolor = input(\"What color?\")\n\nvalid_colors= (\"red\", \"green\", \"yellow\")\n\nif (color not in valid_colors):\n color=\"red\"\n\nf = Figlet(font='slant')\nprint(colored(f.renderText(message), color=color))\n","sub_path":"Modules/ASCII_ART.py","file_name":"ASCII_ART.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"104399397","text":"import pyodbc\nimport pandas as pd\nimport os\nfrom connection.connect_string import *\n\n\ndef main():\n po_number = input('Inscrire numéro de PO:')\n current_folder = os.path.dirname(os.path.abspath(__file__))\n save_path = \"%s\\\\T%s.xlsx\" % (current_folder, po_number)\n cnxx = pyodbc.connect(connect_string())\n df = get_project_no(cnxx, po_number)\n #print(df)\n save_to_excel(df, save_path)\n cnxx.close()\n\n\ndef get_project_no(cnxx, po_number):\n sql = (\n \"SELECT C.DOC_NAME, C.PROJECT_NO, sum(A.qty*B.unit_price)\"\n \"FROM [ETI].[dbo].[P_ORDER_SUBDTL] A \"\n \"inner join p_order_dtl B on B.ITEM_NO = A.ITEM_NO \"\n \"inner join projet C on C.PROJECT_NO = LEFT(A.job_no,4) \"\n \"where A.po={} and B.po={} and b.SELECTED_ITEM=1\"\n \"group by C.DOC_NAME, C.PROJECT_NO\"\n .format(po_number, po_number)\n )\n df = pd.read_sql(sql, cnxx)\n return df\n\n\ndef save_to_excel(df, save_path):\n writer = pd.ExcelWriter(save_path)\n df.to_excel(writer, sheet_name='sheet11', index=False)\n writer.save()\n\n\nif __name__ == '__main__':\n # execute only if run as the entry point into the program\n main()\n","sub_path":"findprojectnumber/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"237713871","text":"#!/usr/bin/env python3\n\nimport spacy\nimport random, re\nimport argparse\nimport sys\n\n# Helper functions\n\ndef get_wh(token, span): \n entities = span.ents\n wh_word = \"What\"\n for ent in entities:\n if ent.text.lower() == token.text.lower() and ent.label_ in [\"GPE\", \"ORG\", \"PRODUCT\"]:\n wh_word = \"What\"\n return wh_word\n elif ent.text.lower() == token.text.lower() and ent.label_ in [\"LOC\", \"GPE\"]:\n wh_word = \"Where\"\n return wh_word\n elif ent.text.lower() == token.text.lower() and ent.label_ == \"PERSON\":\n wh_word = \"Who\"\n return wh_word\n return wh_word\n\ndef get_noun_phrase(token):\n noun_phrase = \"\"\n for sub in token.subtree:\n if sub.pos_ != \"CCONJ\":\n noun_phrase += (sub.text + \" \")\n # Proper spacing\n return noun_phrase[:-1]\n\ndef generate_questions(doc):\n question_list = []\n for chunk in doc.noun_chunks:\n # The head of syntactic parent\n head = chunk.root.head\n #print(chunk.text, '(%s)' %chunk.root.text, 'head: %s' %head.text)\n dep_children = [child for child in chunk.root.children]\n noun_phrase = chunk.text\n verb_phrase = \"\"\n subject_phrase = \"\"\n prep_phrase = \"\"\n entity = \"\"\n # Decapitalize non entity\n if len(chunk.ents) > 0:\n entity = chunk.ents[0].label_\n else:\n noun_phrase = noun_phrase.lower()\n if head.pos_ == \"VERB\" and chunk.root.dep_ in [\"nsubj\"]:\n verb_phrase = head.text\n for verb_child in head.children:\n if verb_child.dep_ in [\"prep\", \"agent\"]:\n verb_phrase += \" \" + verb_child.text\n if verb_child.dep_ == \"dobj\":\n subject_phrase = verb_child.text\n #print(noun_phrase, verb_phrase, subject_phrase)\n\n # Passive\n if head.pos_ == \"VERB\" and chunk.root.dep_ in [\"nsubjpass\"]:\n verb_phrase = head.text\n for verb_child in head.children:\n if verb_child.dep_ in [\"auxpass\"]:\n # Ask about the object\n #print(\"Who\", head.text, get_noun_phrase(chunk.root), \"?\")\n question = \"Who \" + head.text + \" \" + get_noun_phrase(chunk.root)+\"?\"\n if question not in question_list:\n question_list.append(question)\n\n # What-is (Attribute) question\n if head.pos_ == \"AUX\":\n #print(\"(Attribute)\", get_wh(head, sample), head.text, noun_phrase)\n question = get_wh(head, doc) + \" \" + head.text + \" \" + noun_phrase +\"?\"\n if question not in question_list:\n question_list.append(question)\n\n\n # Question about chunk from clause\n for child in dep_children:\n # Noun preposition\n if child.dep_ == \"prep\":\n noun_prep = doc[child.i: child.right_edge.i+1].text\n noun_phrase += (\" \" + noun_prep)\n if child.pos_ == \"VERB\" and child.dep_ in [\"acl\", \"nsubjpass\"]:\n verb_phrase = child.text\n for verb_prep in child.children:\n if verb_prep.dep_ in [\"prep\", \"agent\"]:\n verb_phrase += \" \" + verb_prep.text\n prep = verb_phrase\n #print(\"(from clause)\", get_wh(chunk, sample), \"is\", noun_phrase, verb_phrase + \"?\") \n question = get_wh(chunk, doc) + \" \" + \"is\" + \" \" + noun_phrase + \" \" + verb_phrase + \"?\"\n if question not in question_list:\n question_list.append(question)\n\n\n # NP + VP + Subject question: What did NP + VP?\n if chunk.root.dep_ in [\"nsubj\"] and head.pos_ in [\"VERB\"]:\n #print(\"(NP + VP)\", get_wh(head, sample), \"did\", noun_phrase, head.text)\n question = get_wh(head, doc) + \" \" + \"did\" + \" \" + noun_phrase + \" \" + head.lemma_ + \"?\"\n if question not in question_list:\n question_list.append(question)\n return question_list\n\n# Select questions regarding entities\ndef contain_entity(question, doc): \n entities = doc.ents\n for entity in entities:\n if re.search(entity.text.lower(), question) is not None:\n return True\n return False\n\ndef ask(text, nquestions):\n doc = nlp(text)\n question_list = generate_questions(doc)\n output = []\n # Question selection\n for question in question_list:\n\n output.append(question)\n while (nquestions > 0):\n random_idx = random.randint(0, len(question_list)-1)\n print(question_list[random_idx])\n nquestions -= 1\n \n \n \ndef parse_args():\n parser = argparse.ArgumentParser('Question Generator')\n parser.add_argument('text_file', help='text_file to read')\n parser.add_argument('num', help='number of questions to generate', type=int)\n args = parser.parse_args()\n return args\n\nif __name__ == '__main__':\n #path = './Development_data/set1/a1.txt'\n args = parse_args()\n text_file = args.text_file\n num = args.num\n with open(text_file, 'r', encoding='utf8') as f:\n text = f.read()\n # load 'en_core_web_sm' if you dont have it installed\n nlp = spacy.load('en_core_web_lg')\n #nlp = spacy.load('en_core_web_sm')\n doc = nlp(text)\n ask(text, num)","sub_path":"ask.py","file_name":"ask.py","file_ext":"py","file_size_in_byte":5342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"62441168","text":"#https://codecombat.com/play/level/golden-choice\n\n################################################\n'''\nI Tried to finish this level using recursion, but level to slow, when i Use it.\n\nSo I will finish level with most stupid way.\n\nThis is the example of recursion function(unfinished)\n\ndef CikleInCicle(y, k, summ):\n if y<9:\n summ = summ + goldMap[y][k]\n if k>0:\n summ1 = CikleInCicle(y+1, k - 1, summ)\n else:\n summ1 = 0\n if k<18:\n summ2 = CikleInCicle(y+1, k+1, summ)\n else:\n summ2 = 0\n #hero.say(y+'(y)'+k+'(k)'+summ1 +'x' +summ2)\n if summ1>summ2:\n return summ1 + summ\n else:\n return summ2 + summ\n else:\n #hero.say('top:' + goldMap[y][k])\n return goldMap[y][k]\ngoldMap = makeGoldMap(hero.findItems())\nfor kstart in range(0, 19, 2):\n summ = CikleInCicle(0, kstart, 0)\n #hero.say(kstart +'x' +summ)\n\nHere I need to return the route with summ also, but i will newer do that, beacuse of message:\n\n\"Code never finished. It's either really slow or has an infinite loop.\"\n'''\n###############################################\nkstart = []\nkmax = []\nsummMax = 0\nfor kstart[0] in range(0, 19, 2): #sorry for that\n for kstart[1] in range(1, 19, 2):#i am really sorry\n if Math.abs(kstart[0] - kstart[1]) == 1:\n for kstart[2] in range(0, 19, 2):#but i want to pass this level\n if Math.abs(kstart[1] - kstart[2]) == 1:\n for kstart[3] in range(1, 19, 2):#the best code to slow\n if Math.abs(kstart[2] - kstart[3]) == 1:\n for kstart[4] in range(0, 19, 2):#i never do this again\n if Math.abs(kstart[3] - kstart[4]) == 1:\n for kstart[5] in range(1, 19, 2):#thats such a shame\n if Math.abs(kstart[4] - kstart[5]) == 1:\n for kstart[6] in range(0, 19, 2):#i was drunk when i tipe it\n if Math.abs(kstart[5] - kstart[6]) == 1:\n for kstart[7] in range(1, 19, 2):#realy realy\n if Math.abs(kstart[6] - kstart[7]) == 1:\n for kstart[8] in range(0, 19, 2):#I swear!\n if Math.abs(kstart[7] - kstart[8]) == 1:\n for kstart[9] in range(1, 19, 2):#nex time i will use recursion\n if Math.abs(kstart[8] - kstart[9]) == 1:\n tempSumm = 0\n for y in range(0, 9):\n tempSumm = tempSumm + goldMap[y][kstart[y]]\n if tempSumm>summMax:\n summMax = tempSumm\n kmax = kstart\n\n\n","sub_path":"Glacier/GoldenChoice.py","file_name":"GoldenChoice.py","file_ext":"py","file_size_in_byte":3403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"508933481","text":"import numpy as np\nfrom itertools import izip\nfrom operator import itemgetter\nimport ldig\n\n\nclass LdigDetector(object):\n \"\"\"Standalone detector, based on `server.py:Detector`.\n\n If your text is already normalized, it might be slightly faster to\n initialize with `normalize=False` prior to use the `detect` method, or just\n use the `_detect_normalize` or `_detect` methods directly.\n\n \"\"\"\n def __init__(self, modeldir, normalize=True):\n self.ldig = ldig.ldig(modeldir)\n self.features = self.ldig.load_features()\n self.trie = self.ldig.load_da()\n self.labels = self.ldig.load_labels()\n self.param = np.load(self.ldig.param)\n if normalize:\n self.detect = self._detect_normalize\n else:\n self.detect = self._detect\n\n def _detect_normalize(self, text):\n _, text, _ = ldig.normalize_text(text)\n return self._detect(text)\n\n def _detect(self, text):\n events = self.trie.extract_features(u\"\\u0001\" + text + u\"\\u0001\")\n _sum = np.zeros(len(self.labels))\n\n for id in sorted(events, key=lambda id: self.features[id][0]):\n phi = self.param[id, ]\n _sum += phi * events[id]\n exp_w = np.exp(_sum - _sum.max())\n prob = exp_w / exp_w.sum()\n\n r = sorted(izip(self.labels, prob), key=itemgetter(1), reverse=True)\n return r\n","sub_path":"ldig/ldig_standalone.py","file_name":"ldig_standalone.py","file_ext":"py","file_size_in_byte":1382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"142267209","text":"from sklearn.neural_network import MLPClassifier\r\nimport numpy as np\r\nimport random\r\nimport math\r\n\r\n#Original Raw data of characters 0-9\r\n#Characters are represented by bits on a 5x6 grid expanded out into an array here \r\nD0 = np.array([0,1,1,1,0,0,1,0,1,0,0,1,0,1,0,0,1,0,1,0,0,1,0,1,0,0,1,1,1,0]);\r\nD1 = np.array([0,0,1,0,0,0,0,1,0,0,0,0,1,0,0,0,0,1,0,0,0,0,1,0,0,0,0,1,0,0]);\r\nD2 = np.array([0,1,1,1,0,0,0,0,1,0,0,1,1,1,0,0,1,0,0,0,0,1,0,0,0,0,1,1,1,0]);\r\nD3 = np.array([0,1,1,1,0,0,0,0,1,0,0,1,1,1,0,0,0,0,1,0,0,0,0,1,0,0,1,1,1,0]);\r\nD4 = np.array([0,1,0,1,0,0,1,0,1,0,0,1,1,1,0,0,0,0,1,0,0,0,0,1,0,0,0,0,1,0]);\r\nD5 = np.array([0,1,1,1,0,0,1,0,0,0,0,1,0,0,0,0,1,1,1,0,0,0,0,1,0,0,1,1,1,0]);\r\nD6 = np.array([0,1,1,1,0,0,1,0,0,0,0,1,0,0,0,0,1,1,1,0,0,1,0,1,0,0,1,1,1,0]);\r\nD7 = np.array([0,1,1,1,0,0,0,0,1,0,0,0,0,1,0,0,0,0,1,0,0,0,0,1,0,0,0,0,1,0]);\r\nD8 = np.array([0,1,1,1,0,0,1,0,1,0,0,1,1,1,0,0,1,1,1,0,0,1,0,1,0,0,1,1,1,0]);\r\nD9 = np.array([0,1,1,1,0,0,1,0,1,0,0,1,1,1,0,0,0,0,1,0,0,0,0,1,0,0,1,1,1,0]);\r\n\r\n#DX_N is the array for the X character with N bits flipped\r\nD0_1 = D0; D0_5 = D0; D0_10 = D0; D0_15 = D0;\r\nD1_1 = D1; D1_5 = D1; D1_10 = D1; D1_15 = D1;\r\nD2_1 = D2; D2_5 = D2; D2_10 = D2; D2_15 = D2;\r\nD3_1 = D3; D3_5 = D3; D3_10 = D3; D3_15 = D3;\r\nD4_1 = D4; D4_5 = D4; D4_10 = D4; D4_15 = D4;\r\nD5_1 = D5; D5_5 = D5; D5_10 = D5; D5_15 = D5;\r\nD6_1 = D6; D6_5 = D6; D6_10 = D6; D6_15 = D6;\r\nD7_1 = D7; D7_5 = D7; D7_10 = D7; D7_15 = D7;\r\nD8_1 = D8; D8_5 = D8; D8_10 = D8; D8_15 = D8;\r\nD9_1 = D9; D9_5 = D9; D9_10 = D9; D9_15 = D9;\r\n\r\n#Grouping characters 0-9 together into input arrays for training\r\n#NX will contain characters 0-9 with X bits flipped at random\r\nN0 = np.array([D0_1,D1_1,D2_1,D3_1,D4_1,D5_1,D6_1,D7_1,D8_1,D9_1]);\r\nN1 = np.array([D0_1,D1_1,D2_1,D3_1,D4_1,D5_1,D6_1,D7_1,D8_1,D9_1]);\r\nN5 = np.array([D0_5,D1_5,D2_5,D3_5,D4_5,D5_5,D6_5,D7_5,D8_5,D9_5]);\r\nN10 = np.array([D0_10,D1_10,D2_10,D3_10,D4_10,D5_10,D6_10,D7_10,D8_10,D9_10]);\r\nN15 = np.array([D0_15,D1_15,D2_15,D3_15,D4_15,D5_15,D6_15,D7_15,D8_15,D9_15]);\r\n\r\ntags = np.array([0,1,2,3,4,5,6,7,8,9]);\r\n\r\n#Takes array and number of bits to flip\r\ndef addNoise(A, n):\r\n for x in A:\r\n for i in range(0,n):\r\n index = random.randint(0,len(x)-1); \r\n y = x[index];\r\n #Y is assumed either 1 or zero \r\n if y == 1:\r\n y = 0;\r\n else:\r\n y = 1;\r\n x[index] = y;\r\n \r\n#Adding noise to our data\r\naddNoise(N1, 1);\r\naddNoise(N5, 5);\r\naddNoise(N10, 10);\r\naddNoise(N15, 15);\r\n\r\n#\"Pixels\" per character\r\n#Each pixel is an input for our model\r\npSize = 6 * 5;\r\n\r\n#Size of tuple determines how many hidden layers we use, \r\n#interger value represents neurons in that layer\r\nHLTuple = (pSize);\r\n\r\n#% missed error of classifier \r\ndef error(pred, real):\r\n e = 0;\r\n for i in range(0, len(pred)-1):\r\n if pred[i] != real[i]:\r\n e = e + 1\r\n return e/len(pred)\r\n\r\n#Initialize our MLP \r\nM = MLPClassifier(solver='lbfgs', hidden_layer_sizes=HLTuple);\r\n\r\n#1.) With dataset_0 for the training, dataset_5 for the test, and one hidden layer\r\nM.fit(N0, tags);\r\nprint(error(M.predict(N5), tags));\r\n \r\n#2.) With dataset_1 for the training, dataset_5 for the test, and one hidden layer\r\nM.fit(N1, tags);\r\nprint(error(M.predict(N5), tags));\r\n\r\n#3.) With dataset_5 for the training, dataset_5 for the test, and one hidden layer\r\nM.fit(N5, tags);\r\nprint(error(M.predict(N5), tags));\r\n\r\n#4.) With dataset_10 for the training, dataset_5 for the test, and one hidden layer\r\nM.fit(N10, tags);\r\nprint(error(M.predict(N5), tags));\r\n\r\n#5.) With dataset_15 for the training, dataset_5 for the test, and one hidden layer\r\nM.fit(N15, tags);\r\nprint(error(M.predict(N5), tags));\r\n\r\n#Adding a hidden layer for our two hidden layer cases\r\nHLTuple = (pSize, pSize);\r\n\r\n#6.) With dataset_0 for the training, dataset_5 for the test, and two hidden layer\r\nM.fit(N0, tags);\r\nprint(error(M.predict(N5), tags));\r\n\r\n#7.) With dataset_1 for the training, dataset_5 for the test, and two hidden layer\r\nM.fit(N1, tags);\r\nprint(error(M.predict(N5), tags));\r\n\r\n#8.) With dataset_5 for the training, dataset_5 for the test, and two hidden layer\r\nM.fit(N5, tags);\r\nprint(error(M.predict(N5), tags));\r\n\r\n#9.) With dataset_10 for the training, dataset_5 for the test, and two hidden layer\r\nM.fit(N10, tags);\r\nprint(error(M.predict(N5), tags));\r\n\r\n#10.) With dataset_15 for the training, dataset_5 for the test, and two hidden layer\r\nM.fit(N15, tags);\r\nprint(error(M.predict(N5), tags));\r\n","sub_path":"NeuralNetExample.py","file_name":"NeuralNetExample.py","file_ext":"py","file_size_in_byte":4460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"536225548","text":"from __future__ import print_function\nfrom __future__ import absolute_import\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom chainer import serializers\nfrom dqn import environment, network\nimport random\nimport numpy as np\n\nname = 'main7'\nnum = '45'\nmodel_path = 'result/{}/network/ae_model{}.npz'.format(name, num)\n\nenv = environment.Environment('test')\nmodel = network.AEC4()\nserializers.load_npz(model_path, model)\n\nx = env.reset()\nfor i in range(25):\n x, _, _, _ = env.step(random.choice([0, 1, 2, 3]))\n\nx = np.array(x).astype(np.float32) / 255.0\nx = np.expand_dims(x, axis=0)\nx_ = model(x, test=True)\nx = (255.0 * x).astype(np.int8)\nx_ = (255.0 * x_.data).astype(np.int8)\n\nplt.figure()\nfor i in range(4):\n plt.subplot(2, 4, i+1)\n plt.gray()\n plt.imshow(x[0, i])\n plt.subplot(2, 4, i+5)\n plt.gray()\n plt.imshow(x_[0, i])\nplt.savefig('ae.png')\n","sub_path":"pong2/evaluate/ae_plot.py","file_name":"ae_plot.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"28927012","text":"from PIL import Image\nimport glob\n\ndef resize(file, basewidth):\n img = Image.open(file)\n wpercent = (basewidth/float(img.size[0]))\n hsize = int((float(img.size[1])*float(wpercent)))\n img = img.resize((basewidth,hsize), Image.ANTIALIAS)\n img.save(file)\n print(\"done\")\n\ndef resizeFolder(folder, baseWidth):\n for file in glob.glob(folder):\n resize(file, baseWidth)\n print(\"done all\")\n\n\n\n\n","sub_path":"out/production/Pokemon Crossing/Assets/resizer.py","file_name":"resizer.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"27998514","text":"import requests\nimport json\n\nclient_id = 'f2bafb758b91c49d329a'\nclient_secret = 'e5ee2cc4910c763e2ad4b97ea54addf8'\n\n# инициируем запрос на получение токена\nr = requests.post(\"https://api.artsy.net/api/tokens/xapp_token\",\n data={\n \"client_id\": client_id,\n \"client_secret\": client_secret\n })\n\n# разбираем ответ сервера\nj = json.loads(r.text)\n\n# достаем токен\ntoken = j[\"token\"]\n\n\n# создаем заголовок, содержащий наш токен\nheaders = {\"X-Xapp-Token\" : token}\n# инициируем запрос с заголовком\n\ndict_artists=[]\nwith open(\"dataset_24476_4.txt\") as f:\n for artist_id in f.read().splitlines():\n r = requests.get(\"https://api.artsy.net/api/artists/\"+artist_id, headers=headers)\n j = json.loads(r.text)\n dict_artists.append({'sortable_name':j['sortable_name'],\"birthday\":j[\"birthday\"]})\n\n dict_artists.sort(key=lambda x:(x[\"birthday\"], x['sortable_name']))\n for a in dict_artists:\n print(a[\"sortable_name\"])","sub_path":"work with art api.py","file_name":"work with art api.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"364924180","text":"from rest_framework.serializers import ModelSerializer\nfrom rest_framework.validators import UniqueTogetherValidator\n\nfrom data_sources.models import Gateway, GatewayStatus, GatewayTag, Posse\n\n\nclass PosseSerializer(ModelSerializer):\n class Meta:\n model = Posse\n fields = '__all__'\n\n\nclass GatewayTagSerializer(ModelSerializer):\n class Meta:\n model = GatewayTag\n fields = '__all__'\n validators = (\n UniqueTogetherValidator(\n queryset=GatewayTag.objects.all(),\n fields=('gateway', 'label'),\n message='Uniqueness Breached: gateway and label should be'\n 'unique in GatewayTag.',\n ),\n UniqueTogetherValidator(\n queryset=GatewayTag.objects.all(),\n fields=('gateway', 'hardware_name'),\n message='Uniqueness Breached: gateway and hardware_name'\n 'should be unique in GatewayTag.',\n ),\n )\n\n\nclass GatewaySerializer(ModelSerializer):\n tags = GatewayTagSerializer(many=True, read_only=True)\n\n class Meta:\n model = Gateway\n fields = (\n 'id',\n 'created_at',\n 'updated_at',\n 'label',\n 'location',\n 'oauth2_client_id',\n 'serial_number',\n 'posse',\n 'queue_name',\n 'data_flow',\n 'tags',\n )\n read_only_fields = ('queue_name', 'data_flow', 'tags')\n validators = (UniqueTogetherValidator(\n queryset=Gateway.objects.all(),\n fields=('label', 'serial_number'),\n message='Uniqueness Breached: label and serial number should be'\n 'unique in Gateway.',\n ), )\n\n\nclass GatewayStatusSerializer(ModelSerializer):\n class Meta:\n model = GatewayStatus\n fields = '__all__'\n","sub_path":"data_sources/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"68452961","text":"import config\r\nimport telebot\r\nfrom telebot import types\r\nimport firebase_admin\r\nfrom firebase_admin import credentials\r\nfrom firebase_admin import db\r\n\r\nbot = telebot.TeleBot(config.token) #должно быть в начале. Вызывает токен\r\ncred = credentials.Certificate(\"/home/kokotoverta/verta/key.json\")\r\nfirebase_admin.initialize_app(cred, {\r\n 'databaseURL': 'https://vertabot.firebaseio.com/' })\r\n\r\n#result = db.reference('/bot/answers/1/email') #указываете ключ, значение которого хотите получить.\r\n#print(result.get())\r\n\r\n@bot.message_handler(commands=['start'])\r\ndef start_message(message):\r\n user_id = message.from_user.id\r\n user_markup = telebot.types.ReplyKeyboardMarkup(True, True)\r\n user_markup.row(\"Для себя\", \"Для бизнеса\")\r\n db.reference(\"/bot/users/\"+str(user_id)).update({\"current\": \"category\"})\r\n bot.send_message(user_id,\"Отлично! Я тебя ждал. Для чего тебя нужен бот?\",reply_markup = user_markup)\r\n \r\n@bot.message_handler(content_types = ['text', 'contact'])\r\ndef start_dialog(message):\r\n user_id = message.from_user.id\r\n current = db.reference(\"/bot/users/\"+str(user_id)+\"/current\").get()\r\n print(current)\r\n if current == \"category\":\r\n if message.text == \"Для себя\":\r\n bot.send_message(user_id,\"Окей, круто! А что ваш бот должен уметь делать?\")\r\n db.reference(\"/bot/users/\"+str(user_id)).update({\"category\": \"self\"})\r\n \r\n db.reference(\"/bot/users/\"+str(user_id)).update({\"current\": \"ability\"})\r\n elif message.text == \"Для бизнеса\":\r\n bot.send_message(message.from_user.id,\"Чат-бот для бизнеса очень важен. Что он должен уметь делать?\")\r\n db.reference(\"/bot/users/\"+str(user_id)).update({\"category\": \"business\"})\r\n \r\n db.reference(\"/bot/users/\"+str(user_id)).update({\"current\": \"ability\"})\r\n else:\r\n bot.send_message(message.from_user.id,\"Ошибка! Выберите из перечисленных.\")\r\n elif current == \"ability\":\r\n db.reference(\"/bot/users/\"+str(user_id)).update({\"ability\": message.text})\r\n key = telebot.types.ReplyKeyboardMarkup(resize_keyboard=True)\r\n contact_key = telebot.types.KeyboardButton(text=\"Отправить свой номер\", request_contact=True)\r\n key.add(contact_key)\r\n bot.send_message(message.from_user.id,\"Замечательно! Отправь мне свои контакты, чтобы мы смогли с тобой связаться.\", reply_markup = key)\r\n db.reference(\"/bot/users/\"+str(user_id)).update({\"current\": \"contacts\"})\r\n elif current == \"contacts\":\r\n category = db.reference(\"/bot/users/\"+str(user_id)+ str(\"/category\")).get()\r\n ability = db.reference(\"/bot/users/\"+str(user_id)+ \"/ability\").get()\r\n print(message)\r\n user_name = message.from_user.username\r\n phone_number = message.contact.phone_number\r\n first_name = message.contact.first_name\r\n phone = db.reference(\"/bot/users/\"+str(user_id)).update({\"phone\": phone_number})\r\n user = db.reference(\"/bot/users/\"+str(user_id)).update({\"user_name\": user_name})\r\n name = db.reference(\"/bot/users/\"+str(user_id)).update({\"name\": first_name})\r\n \r\n bot.send_message(message.from_user.id,\"Отлично! В скором времени мы с тобой свяжемся :) Если хочешь можешь вернуться на наш сайт.\")\r\n inline = types.InlineKeyboardMarkup()\r\n url_button = types.InlineKeyboardButton(text = \"reVerta\", url = \"https://www.reverta.online/\")\r\n inline.add(url_button)\r\n bot.send_message(message.from_user.id,\"Ссылка на сайт\", reply_markup = inline)\r\n \r\n bot.send_message(\"337465823\", \"От кого: \" + str(first_name) + \" @\" + str(user_name) + \"\\nНомер: \" + str(phone_number) + \"\\nДля чего: \" + category + \"\\nЧто он должен уметь: \" + ability)\r\n bot.send_message(\"342420058\", \"От кого: \" + str(first_name) + \" @\" + str(user_name) + \"\\nНомер: \" + str(phone_number) + \"\\nДля чего: \" + category + \"\\nЧто он должен уметь: \" + ability)\r\n \r\n\r\nbot.polling(none_stop=True, interval = 2) #в конце. Бесконечно.\r\n\r\n\r\n","sub_path":"vertabot.py","file_name":"vertabot.py","file_ext":"py","file_size_in_byte":4528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"263458735","text":"#!/usr/bin/env python\nfrom itertools import product, count, islice\nfrom math import sqrt\n\ndef rebase(n, b):\n total = 0\n for i, digit in enumerate(reversed(list(str(n)))):\n total += (b**i)*int(digit)\n return total\n\n\ndef notPrime(n):\n for number in islice(count(2), int(sqrt(n) - 1)):\n if not n % number:\n return number\n # Looking at a million numbers is good enough...\n if number > 1000000:\n break\n return False\n\n\ndef list_coins(n, j):\n coin_list = []\n for inner_coin in product('01', repeat=n-2):\n coin = int('1'+''.join(inner_coin)+'1')\n div_list = []\n for coin_b10 in [rebase(coin, x) for x in range(2, 11)]:\n div = notPrime(coin_b10)\n if not div:\n break\n div_list.append(div)\n if len(div_list) == 9:\n coin_list.append((coin, div_list))\n if len(coin_list) == j:\n break\n return coin_list\n\n# print list_coins(16, 50)\n# print rebase(1001, 10)\n# print notPrime(41)\n\nt = int(raw_input())\nfor i in xrange(1, t + 1):\n print('Case #{}:'.format(i))\n in_n, in_j = (int(x) for x in raw_input().split())\n for coin, divs in list_coins(in_n, in_j):\n print('{} {}'.format(coin, ' '.join(str(x) for x in divs)))\n","sub_path":"codes/CodeJamCrawler/16_0_3/snaggie/coin_jam.py","file_name":"coin_jam.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"464878171","text":"\"\"\"\nsource:\n\nhttps://www.geeksforgeeks.org/dijkstras-shortest-path-algorithm-greedy-algo-7/\n\nspt: shortest path tree\n\"\"\"\n\nimport sys\n\n\ndef min_distance(dist, shortest_path_tree):\n \"\"\"\n aka EXTRACT_MIN\n \"\"\"\n min_ = sys.maxsize\n min_index = 0\n\n for v in range(len(dist)):\n if not shortest_path_tree[v] and dist[v] <= min_:\n min_ = dist[v]\n min_index = v\n\n return min_index\n\n\ndef path_length(adjacency_matrix, source, destination):\n shortest_path_tree = [False] * len(adjacency_matrix)\n\n dist = [sys.maxsize] * len(adjacency_matrix)\n dist[source] = 0\n\n # while Q is not empty\n for _ in range(len(dist) - 1):\n\n # do u <- EXTRACT_MIN(Q)\n u = min_distance(dist, shortest_path_tree)\n\n shortest_path_tree[u] = True\n\n # for each vertex (v) in Adj[u]\n # do relax(u, v, w)\n for v in range(len(dist)):\n if shortest_path_tree[v]:\n continue\n\n # not connected\n if not adjacency_matrix[u][v]:\n continue\n\n if dist[u] == sys.maxsize:\n continue\n\n # relax\n if dist[u] + adjacency_matrix[u][v] < dist[v]:\n dist[v] = dist[u] + adjacency_matrix[u][v]\n\n return dist[destination]\n\n","sub_path":"graph/dijkstra/v1.py","file_name":"v1.py","file_ext":"py","file_size_in_byte":1292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"4566001","text":"# -*- coding: utf-8 -*-\ntry:\n import torch\nexcept ModuleNotFoundError:\n raise ImportError('missing PyTorch')\n\nfrom copy import deepcopy\nfrom math import ceil\nimport odl\nimport numpy as np\nfrom tqdm import tqdm\n\nfrom torch.utils.data import DataLoader\nfrom torch.utils.tensorboard import SummaryWriter\nfrom torch.optim.lr_scheduler import CyclicLR, OneCycleLR\n\nfrom dival.reconstructors import LearnedReconstructor\nfrom dival.measure import PSNR\n\n\nclass StandardLearnedReconstructor(LearnedReconstructor):\n \"\"\"\n Standard learned reconstructor base class.\n\n Provides a default implementation that only requires subclasses to\n implement :meth:`init_model`.\n\n By default, the Adam optimizer is used. This can be changed by\n reimplementing :meth:`init_optimizer`.\n Also, a OneCycleLR scheduler is used by default, which can be changed by\n reimplementing :meth:`init_scheduler`.\n\n The training implementation selects the best model reached after an integer\n number of epochs based on the validation set.\n\n The hyper parameter ``'normalize_by_opnorm'`` selects whether\n :attr:`op` should be normalized by the operator norm.\n In this case, the inputs to :attr:`model` are divided by the operator norm.\n\n Attributes\n ----------\n model : :class:`torch.nn.Module` or `None`\n The neural network.\n Must be initialized by the subclass :meth:`init_model` implementation.\n non_normed_op : :class:`odl.operator.Operator`\n The original `op` passed to :meth:`__init__`, regardless of\n ``self.hyper_params['normalize_by_opnorm']``.\n See also :attr:`op`.\n \"\"\"\n\n HYPER_PARAMS = deepcopy(LearnedReconstructor.HYPER_PARAMS)\n HYPER_PARAMS.update({\n 'epochs': {\n 'default': 20,\n 'retrain': True\n },\n 'batch_size': {\n 'default': 64,\n 'retrain': True\n },\n 'lr': {\n 'default': 0.01,\n 'retrain': True\n },\n 'normalize_by_opnorm': {\n 'default': False,\n 'retrain': True\n }\n })\n\n def __init__(self, op, hyper_params=None, num_data_loader_workers=8,\n use_cuda=True, show_pbar=True, log_dir=None,\n log_num_validation_samples=0,\n save_best_learned_params_path=None, torch_manual_seed=1,\n **kwargs):\n \"\"\"\n Parameters\n ----------\n op : :class:`odl.operator.Operator`\n Forward operator.\n num_data_loader_workers : int, optional\n Number of parallel workers to use for loading data.\n use_cuda : bool, optional\n Whether to use cuda for the U-Net.\n show_pbar : bool, optional\n Whether to show tqdm progress bars during the epochs.\n log_dir : str, optional\n Tensorboard log directory (name of sub-directory in utils/logs).\n If `None`, no logs are written.\n log_num_valiation_samples : int, optional\n Number of validation images to store in tensorboard logs.\n This option only takes effect if ``log_dir is not None``.\n save_best_learned_params_path : str, optional\n Save best model weights during training under the specified path by\n calling :meth:`save_learned_params`.\n torch_manual_seed : int, optional\n Fixed seed to set by ``torch.manual_seed`` before training.\n The default is `1`. It can be set to `None` or `False` to disable\n the manual seed.\n \"\"\"\n super().__init__(reco_space=op.domain,\n observation_space=op.range,\n hyper_params=hyper_params, **kwargs)\n self.non_normed_op = op\n self.num_data_loader_workers = num_data_loader_workers\n self.use_cuda = use_cuda\n self.show_pbar = show_pbar\n self.log_dir = log_dir\n self.log_num_validation_samples = log_num_validation_samples\n self.save_best_learned_params_path = save_best_learned_params_path\n self.torch_manual_seed = torch_manual_seed\n self.model = None\n\n self._opnorm = None\n\n self.device = (torch.device('cuda:0')\n if self.use_cuda and torch.cuda.is_available() else\n torch.device('cpu'))\n\n @property\n def opnorm(self):\n if self._opnorm is None:\n self._opnorm = odl.power_method_opnorm(self.non_normed_op)\n return self._opnorm\n\n @property\n def op(self):\n \"\"\"\n :class:`odl.operator.Operator` :\n The forward operator, normalized if\n ``self.hyper_params['normalize_by_opnorm']`` is ``True``.\n \"\"\"\n if self.normalize_by_opnorm:\n return (1./self.opnorm) * self.non_normed_op\n return self.non_normed_op\n\n def eval(self, test_data):\n self.model.eval()\n\n running_psnr = 0.0\n with tqdm(test_data, desc='test ',\n disable=not self.show_pbar) as pbar:\n for obs, gt in pbar:\n rec = self.reconstruct(obs)\n running_psnr += PSNR(rec, gt)\n\n return running_psnr / len(test_data)\n\n def train(self, dataset):\n if self.torch_manual_seed:\n torch.random.manual_seed(self.torch_manual_seed)\n # create PyTorch datasets\n dataset_train = dataset.create_torch_dataset(\n part='train', reshape=((1,) + dataset.space[0].shape,\n (1,) + dataset.space[1].shape))\n\n dataset_validation = dataset.create_torch_dataset(\n part='validation', reshape=((1,) + dataset.space[0].shape,\n (1,) + dataset.space[1].shape))\n\n # reset model before training\n self.init_model()\n\n criterion = torch.nn.MSELoss()\n self.init_optimizer(dataset_train=dataset_train)\n\n # create PyTorch dataloaders\n data_loaders = {'train': DataLoader(\n dataset_train, batch_size=self.batch_size,\n num_workers=self.num_data_loader_workers, shuffle=True,\n pin_memory=True),\n 'validation': DataLoader(\n dataset_validation, batch_size=self.batch_size,\n num_workers=self.num_data_loader_workers,\n shuffle=True, pin_memory=True)}\n\n dataset_sizes = {'train': len(dataset_train),\n 'validation': len(dataset_validation)}\n\n self.init_scheduler(dataset_train=dataset_train)\n if self.scheduler is not None:\n schedule_every_batch = isinstance(\n self.scheduler, (CyclicLR, OneCycleLR))\n\n best_model_wts = deepcopy(self.model.state_dict())\n best_psnr = 0\n\n if self.log_dir is not None:\n writer = SummaryWriter(log_dir=self.log_dir, max_queue=0)\n validation_samples = dataset.get_data_pairs(\n 'validation', self.log_num_validation_samples)\n\n self.model.to(self.device)\n self.model.train()\n\n for epoch in range(self.epochs):\n # Each epoch has a training and validation phase\n for phase in ['train', 'validation']:\n if phase == 'train':\n self.model.train() # Set model to training mode\n else:\n self.model.eval() # Set model to evaluate mode\n\n running_psnr = 0.0\n running_loss = 0.0\n running_size = 0\n with tqdm(data_loaders[phase],\n desc='epoch {:d}'.format(epoch + 1),\n disable=not self.show_pbar) as pbar:\n for inputs, labels in pbar:\n if self.normalize_by_opnorm:\n inputs = (1./self.opnorm) * inputs\n inputs = inputs.to(self.device)\n labels = labels.to(self.device)\n\n # zero the parameter gradients\n self.optimizer.zero_grad()\n\n # forward\n # track gradients only if in train phase\n with torch.set_grad_enabled(phase == 'train'):\n outputs = self.model(inputs)\n loss = criterion(outputs, labels)\n\n # backward + optimize only if in training phase\n if phase == 'train':\n loss.backward()\n torch.nn.utils.clip_grad_norm_(\n self.model.parameters(), max_norm=1)\n self.optimizer.step()\n if (self.scheduler is not None and\n schedule_every_batch):\n self.scheduler.step()\n\n for i in range(outputs.shape[0]):\n labels_ = labels[i, 0].detach().cpu().numpy()\n outputs_ = outputs[i, 0].detach().cpu().numpy()\n running_psnr += PSNR(outputs_, labels_)\n\n # statistics\n running_loss += loss.item() * outputs.shape[0]\n running_size += outputs.shape[0]\n\n pbar.set_postfix({'phase': phase,\n 'loss': running_loss/running_size,\n 'psnr': running_psnr/running_size})\n if self.log_dir is not None and phase == 'train':\n step = (epoch * ceil(dataset_sizes['train']\n / self.batch_size)\n + ceil(running_size / self.batch_size))\n writer.add_scalar('loss/{}'.format(phase),\n torch.tensor(running_loss/running_size), step)\n writer.add_scalar('psnr/{}'.format(phase),\n torch.tensor(running_psnr/running_size), step)\n\n if self.scheduler is not None and not schedule_every_batch:\n self.scheduler.step()\n\n epoch_loss = running_loss / dataset_sizes[phase]\n epoch_psnr = running_psnr / dataset_sizes[phase]\n\n if self.log_dir is not None and phase == 'validation':\n step = (epoch+1) * ceil(dataset_sizes['train']\n / self.batch_size)\n writer.add_scalar('loss/{}'.format(phase),\n epoch_loss, step)\n writer.add_scalar('psnr/{}'.format(phase),\n epoch_psnr, step)\n\n # deep copy the model (if it is the best one seen so far)\n if phase == 'validation' and epoch_psnr > best_psnr:\n best_psnr = epoch_psnr\n best_model_wts = deepcopy(self.model.state_dict())\n if self.save_best_learned_params_path is not None:\n self.save_learned_params(\n self.save_best_learned_params_path)\n\n if (phase == 'validation' and self.log_dir is not None and\n self.log_num_validation_samples > 0):\n with torch.no_grad():\n val_images = []\n for (y, x) in validation_samples:\n y = torch.from_numpy(\n np.asarray(y))[None, None].to(self.device)\n x = torch.from_numpy(\n np.asarray(x))[None, None].to(self.device)\n reco = self.model(y)\n reco -= torch.min(reco)\n reco /= torch.max(reco)\n val_images += [reco, x]\n writer.add_images(\n 'validation_samples', torch.cat(val_images),\n (epoch + 1) * (ceil(dataset_sizes['train'] /\n self.batch_size)),\n dataformats='NCWH')\n\n print('Best val psnr: {:4f}'.format(best_psnr))\n self.model.load_state_dict(best_model_wts)\n\n def init_model(self):\n \"\"\"\n Initialize :attr:`model`.\n Called in :meth:`train` at the beginning.\n \"\"\"\n raise NotImplementedError\n\n def init_optimizer(self, dataset_train):\n \"\"\"\n Initialize the optimizer.\n Called in :meth:`train`, after calling :meth:`init_model` and before\n calling :meth:`init_scheduler`.\n\n Parameters\n ----------\n dataset_train : :class:`torch.utils.data.Dataset`\n The training (torch) dataset constructed in :meth:`train`.\n \"\"\"\n self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr)\n\n def init_scheduler(self, dataset_train):\n \"\"\"\n Initialize the learning rate scheduler.\n Called in :meth:`train`, after calling :meth:`init_optimizer`.\n\n Parameters\n ----------\n dataset_train : :class:`torch.utils.data.Dataset`\n The training (torch) dataset constructed in :meth:`train`.\n \"\"\"\n self.scheduler = torch.optim.lr_scheduler.OneCycleLR(\n self.optimizer, max_lr=self.lr,\n steps_per_epoch=ceil(len(dataset_train) / self.batch_size),\n epochs=self.epochs)\n\n def _reconstruct(self, observation):\n self.model.eval()\n with torch.set_grad_enabled(False):\n obs_tensor = torch.from_numpy(\n np.asarray(observation)[None, None])\n if self.normalize_by_opnorm:\n obs_tensor = obs_tensor / self.opnorm\n obs_tensor = obs_tensor.to(self.device)\n reco_tensor = self.model(obs_tensor)\n reconstruction = reco_tensor.cpu().detach().numpy()[0, 0]\n return self.reco_space.element(reconstruction)\n\n def save_learned_params(self, path):\n path = path if path.endswith('.pt') else path + '.pt'\n torch.save(self.model.state_dict(), path)\n\n def load_learned_params(self, path, force_parallel=False):\n path = path if path.endswith('.pt') else path + '.pt'\n self.init_model()\n map_location = ('cuda:0' if self.use_cuda and torch.cuda.is_available()\n else 'cpu')\n state_dict = torch.load(path, map_location=map_location)\n\n # backwards-compatibility with non-data_parallel weights\n data_parallel = list(state_dict.keys())[0].startswith('module.')\n if force_parallel and not data_parallel:\n state_dict = {('module.' + k): v for k, v in state_dict.items()}\n self.model.load_state_dict(state_dict)\n","sub_path":"dival/reconstructors/standard_learned_reconstructor.py","file_name":"standard_learned_reconstructor.py","file_ext":"py","file_size_in_byte":15042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"400250502","text":"import cv2\nimport numpy as np\nimport yaml\n\n\nclass Tracker:\n def __init__(self, tracker_config, game_config):\n self.tracker_config = self.read_config(tracker_config)\n if game_config is not None:\n self.game_config = self.read_config(game_config)\n else:\n self.game_config = None\n\n @staticmethod\n def read_config(config_path):\n with open(config_path, \"r\", encoding=\"utf-8\") as f:\n return yaml.safe_load(f)\n\n def undistort(self, img):\n\n c = self.tracker_config['camera']\n # Camera parameters\n k1 = c['k1']\n k2 = c['k2']\n k3 = c['k3']\n p1 = c['p1']\n p2 = c['p2']\n fx = c['fx']\n fy = c['fy']\n cx = c['cx']\n cy = c['cy']\n\n dist = np.array([k1, k2, p1, p2, k3])\n mtx = np.array([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])\n\n # Setting the params\n h, w = img.shape[:2]\n new_camera_tx, roi = cv2.getOptimalNewCameraMatrix(mtx, dist, (w, h), 1, (w, h))\n\n # Undistort\n map_x, map_y = cv2.initUndistortRectifyMap(mtx, dist, None, new_camera_tx, (w, h), 5)\n dst = cv2.remap(img, map_x, map_y, cv2.INTER_LINEAR)\n\n # Crop the image\n x, y, w, h = roi\n dst = dst[y:y + h, x:x + w]\n return dst\n\n def move_origin(self, x, y, transformation_matrix: np.ndarray):\n \"\"\"Translates coordinate to new coordinate system and applies scaling to get units in ~mm.\n Args:\n x (int): x coordinate\n y (int): y coordinate\n transformation_matrix: game field object\n Returns:\n Tuple[int, int]: Corrected coordinates\n \"\"\"\n # Translate coordinates if new origin exists (top left corner of map)\n # if len(map.fieldCorners) == 12:\n\n s_point = np.array([np.array([[x, y]], np.float32)])\n d_point = cv2.perspectiveTransform(s_point, transformation_matrix)\n x = d_point[0][0][0]\n y = d_point[0][0][1]\n return int(round(x)), int(round(y))\n\n # def reverseCorrect(self, x, y, map):\n # \"\"\"Reverses the correction of the coordinates.\n # Scale0 and scale1 define scaling constants. The scaling factor is a linear function of distance from center.\n # Args:\n # x (int): x coordinate\n # y (int): y coordinate\n # map (ResMap) : map object\n # Returns:\n # Tuple[int, int]: Reverted coordinates\n # \"\"\"\n #\n # # Scaling factors\n # scale0 = self.cameraConfig.scale0\n # scale1 = self.cameraConfig.scale1\n #\n # # Convert screen coordinates to 0-based coordinates\n # offset_x = map.imageWidth / 2\n # offset_y = map.imageHeighth / 2\n #\n # # Calculate distance from center\n # dist = np.sqrt((x - offset_x) ** 2 + (y - offset_y) ** 2)\n #\n # # Find the distance before correction\n # distOld = (-scale0 + np.sqrt(scale0 ** 2 + 4 * dist * scale1)) / (2 * scale1)\n #\n # # Revert coordinates and return\n # return (int(round((x - offset_x) / (scale0 + scale1 * distOld) + offset_x)),\n # int(round((y - offset_y) / (scale0 + scale1 * distOld) + offset_y)))\n","sub_path":"sledilnik/Tracker.py","file_name":"Tracker.py","file_ext":"py","file_size_in_byte":3230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"33952045","text":"\nfrom heap import *\n\ndef running_median(seq):\n\n res = []\n\n left = MaxHeap()\n right = MinHeap()\n\n right.push(next(seq, None))\n res.append(right.root)\n\n for v in seq:\n\n if v < right.root:\n left.push(v)\n else:\n right.push(v)\n\n if len(left) > len(right) + 1:\n val = left.pop()\n right.push(val)\n\n elif len(right) > len(left) + 1:\n val = right.pop()\n left.push(val)\n\n if len(left) == len(right):\n median = (left.root + right.root) / 2\n elif len(left) > len(right):\n median = left.root\n\n elif len(right) > len(left):\n median = right.root\n \n res.append(median)\n print('max_heap:', left)\n print('min_heap:', right)\n\n \n return res\n ","sub_path":"#33 Running Median/running_median.py","file_name":"running_median.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"485237436","text":"# -*- coding: utf-8 -*-\n#package for download pictures and conver images\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nimport json\nimport os\nfrom urllib.request import urlopen\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nfrom wand.image import Image \nimport os\n\nclass download(object):\n def __init__(self):\n self.result = ''\n \n def download_pic(self, path, terms, helpterm = ''):\n\n browser = webdriver.Chrome()\n\n for searchterm in terms:\n \n combineterm = searchterm + ' ' + helpterm\n url = \"https://www.google.co.in/search?q=\" + combineterm + \"&source=lnms&tbm=isch\"\n #chrome_options = Options()\n #chrome_options.add_argument(\"user-data-dir=/Users/yanchunyang/Library/Application Support/Google/Chrome/Default\")\n #browser = webdriver.Chrome()\n browser.get(url)\n header={'User-Agent':\"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.134 Safari/537.36\"}\n counter = 0\n succounter = 0\n\n '''\n if not os.path.exists(searchterm):\n os.mkdir(searchterm)\n '''\n \n\n '''\n for _ in range(2):\n browser.execute_script(\"window.scrollBy(0,100)\")\n #browser.execute_script(\"window.scrollBy(0,10000)\")\n '''\n for x in browser.find_elements_by_xpath(\"//div[@class='rg_meta']\"):\n counter = counter + 1\n if counter > 15:\n break\n print(\"Total Count:\"+str(counter))\n print(\"Succsessful Count:\"+str(succounter))\n print (\"URL:\"+json.loads(x.get_attribute('innerHTML'))[\"ou\"])\n\n img = json.loads(x.get_attribute('innerHTML'))[\"ou\"]\n imgtype = json.loads(x.get_attribute('innerHTML'))[\"ity\"]\n try:\n req = urlopen(img)\n output = open(os.path.join(path , searchterm + \"_\" + str(counter) + \".\" + imgtype), \"wb\")\n output.write(req.read())\n output.close()\n succounter = succounter + 1\n print(os.path.join(path , searchterm + \"_\" + str(counter) + \".\" + imgtype))\n except:\n print(\"can't get img\")\n\n print(str(succounter)+\"pictures succesfully downloaded\")\n browser.close()\n\n def convert_img(self, path):\n\n for filename in os.listdir(path):\n index = filename.find('.')\n if len(filename) == index + 1:\n os.rename(os.path.join(path,filename), os.path.join(path,filename+'jpg'))\n\n\n for filename in os.listdir(path):\n index = filename.find('.')\n if filename[index+1:] in ('jpeg', 'jpg', 'png') and 'thumbnail' not in filename:\n try:\n with Image(filename = os.path.join(path, filename)) as img:\n if img.size[0] > 400 and img.size[1] > 400:\n img.resize(60, 60)\n tmp =filename[0:index] + '_' +'thumbnail' + '.'+filename[index+1:]\n img.save(filename = os.path.join(path, tmp))\n except:\n pass\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"dppk.py","file_name":"dppk.py","file_ext":"py","file_size_in_byte":3601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"549694751","text":"import json\nimport requests, asyncio, re\nfrom telethon import events\nfrom .. import jdbot, chat_id, _ConfigDir\n\n\n\n\n# 获取京喜工厂团ID\n@jdbot.on(events.NewMessage(from_users=chat_id, pattern='^/tuan'))\nasync def tuan(event):\n #do something\n msg = await jdbot.send_message(chat_id,\"开始更新京喜工厂组团ID\")\n url = 'https://cdn.jsdelivr.net/gh/gitupdate/updateTeam@master/shareCodes/jd_updateFactoryTuanId.json'\n id = ''\n i = 0\n while True:\n logrequest = requests.get(url)\n if logrequest.status_code == requests.codes.ok:\n id = logrequest.json().get('tuanActiveId')\n with open(f\"{_ConfigDir}/config.sh\", 'r', encoding='utf-8') as f1:\n configs = f1.read()\n f1.close()\n await asyncio.sleep(1.5)\n if configs.find(f\"export TUAN_ACTIVEID=\") != -1:\n configs = re.sub(f'TUAN_ACTIVEID=(\\\"|\\').*(\\\"|\\')', f'TUAN_ACTIVEID=\"{id}\"', configs)\n \n with open(f\"{_ConfigDir}/config.sh\", 'w', encoding='utf-8') as f2:\n f2.write(configs)\n f2.close()\n end = \"替换京喜工厂团ID成功\"\n break \n else:\n await asyncio.sleep(1)\n i = i + 1\n if i > 5:\n end = \"获取京喜工厂团ID失败!请重试。\"\n break\n \n #await jdbot.delete_messages(chat_id, msg)\n await jdbot.send_message(chat_id,end)\n","sub_path":"jbot/tuan.py","file_name":"tuan.py","file_ext":"py","file_size_in_byte":1472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"588855977","text":"import os\nimport numpy as np\nimport pickle as pkl\n\n\ndef read_onoff(night, ref=None, big=False, commercial=False):\n\n # -- defaults\n if ref==None:\n ref = night\n\n if commercial:\n comm = '_comm'\n else:\n comm = ''\n\n\n # -- utilities\n if big:\n oofile = 'big_onoff_'+str(night).zfill(2)+'_'+str(ref).zfill(2)+ \\\n comm+'.pkl'\n else:\n oofile = 'ind_onoff_night_'+str(night).zfill(2)+'.pkl'\n\n infile = os.path.join(os.environ['DST_WRITE'],oofile)\n\n\n # -- read in the lists\n fopen = open(infile,'rb')\n onoff = pkl.load(fopen)\n fopen.close()\n\n\n # -- return\n if big:\n indices = np.array([i for i,j,k in onoff])\n bigons = np.array([j for i,j,k in onoff])\n bigoffs = np.array([abs(k) for i,j,k in onoff])\n\n return indices, bigons, bigoffs\n else:\n return onoff\n","sub_path":"py/dst_read_onoff.py","file_name":"dst_read_onoff.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"39245574","text":"#!/usr/bin/env python\n# coding=utf-8\n\"\"\"\n文件名称:CopyPasteShapeArrowTestCase.py\n作者:ycy\n版本:PPTPro\n创建时间:2019/3/7 16:47\n修改时间:\n软件:PyCharm\n\"\"\"\nfrom script.windows.Operation import *\nfrom script.windows.SystemDialog import SystemDiaglog\nfrom script.windows.PPT3DTestCase.Action import Action\nfrom script.windows.PPT3DSetting.SourcePath import SourcePath\n\nreload(sys)\nsys.setdefaultencoding('UTF-8') # 将脚本编码格式转化未置顶的编码格式\n\n\nclass CopyPasteShapeArrowTestCase(Action, Operation, SystemDiaglog):\n '''复制粘贴形状Arrow'''\n\n def test_main(self):\n '''复制粘贴形状Arrow'''\n self.OperationSetting()\n self.Init3DPPT()\n self.SetTag(\"形状Arrow复制粘贴\", time.time())\n\n self.OneClick(\"BtnInsert\")\n self.OneClick(\"BtnInsertShape\")\n self.OneClick(\"BtnShape_Arrow\")\n time.sleep(1)\n self.Click_XY()\n \n tag = (self.__class__.__doc__ or u\"测试\") + \"_\" + self.__class__.__name__ \n self.startScene(tag)\n \n self.CopyPasteImage(\"ItemShape2D\", \"Arrow复制形状失败\", num=2, dx=30, dy=30)\n time.sleep(2)\n\n self.endScene(tag)\n time.sleep(1)\n self.EndTag()","sub_path":"U3DAutomatorClient/script/windows/PPT3DTestCase/FirstStageTestCase/CopyPasteShapeArrowTestCase.py","file_name":"CopyPasteShapeArrowTestCase.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"591676106","text":"# -*- coding: utf-8 -*-\n# @Author : lun\n\nimport yaml\nimport os\n\nclass Yaml:\n def __init__(self, yamlf):\n if os.path.exists(yamlf):\n self.yamlf = yamlf\n else:\n raise FileNotFoundError(\"文件不存在\")\n self._date = None\n\n def read_data(self):\n if not self._date:\n with open(self.yamlf, \"rb\") as f:\n self._date = yaml.safe_load(f)\n return self._date\n\nif __name__ == \"__main__\":\n MyYml = Yaml(\"calc_data.yml\")\n print(MyYml.read_data())\n","sub_path":"test_pytest/YamlUtil.py","file_name":"YamlUtil.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"58375075","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nRNN模型数据处理\n\"\"\"\n\nimport pickle\nimport os\nimport numpy as np\nimport pandas as pd\nimport logging\nfrom difflib import Differ\nfrom collections import Counter\nfrom collections import OrderedDict\n\n# 配置调试\nlogging.basicConfig(level=logging.DEBUG, format=\"[%(threadName)s]: %(message)s\")\n\n\nclass Processing2RNN:\n \"\"\"\n 转换RNN模型数据\n \"\"\"\n\n def __init__(self):\n # 中间信息打印频率\n self.print_freq = 1\n # 序列长度\n self.seq_len = 100\n # 记录的所有动作数 主牌动作309个 + START,SEP,NONE(用于填充)\n self.num_record_mcard_action = 312\n # 主牌动作\n self.num_mcard_action = 309\n # 手牌索引\n self.card_index = \"34567890JQKA2wW\"\n # 所有手牌\n self.all_card = \"33334444555566667777888899990000JJJJQQQQKKKKAAAA2222wW\"\n\n # 所有主牌动作\n self.mcard_action = ['Solo_3', 'Solo_4', 'Solo_5', 'Solo_6', 'Solo_7', 'Solo_8', 'Solo_9', 'Solo_0', 'Solo_J',\n 'Solo_Q', 'Solo_K', 'Solo_A', 'Solo_2', 'Solo_w', 'Solo_W', 'SoloC_34567', 'SoloC_45678',\n 'SoloC_56789', 'SoloC_67890', 'SoloC_7890J', 'SoloC_890JQ', 'SoloC_90JQK', 'SoloC_0JQKA',\n 'SoloC_345678', 'SoloC_456789', 'SoloC_567890', 'SoloC_67890J', 'SoloC_7890JQ',\n 'SoloC_890JQK', 'SoloC_90JQKA', 'SoloC_3456789', 'SoloC_4567890', 'SoloC_567890J',\n 'SoloC_67890JQ', 'SoloC_7890JQK', 'SoloC_890JQKA', 'SoloC_34567890', 'SoloC_4567890J',\n 'SoloC_567890JQ', 'SoloC_67890JQK', 'SoloC_7890JQKA', 'SoloC_34567890J', 'SoloC_4567890JQ',\n 'SoloC_567890JQK', 'SoloC_67890JQKA', 'SoloC_34567890JQ', 'SoloC_4567890JQK',\n 'SoloC_567890JQKA', 'SoloC_34567890JQK', 'SoloC_4567890JQKA', 'SoloC_34567890JQKA',\n 'Pair_33', 'Pair_44', 'Pair_55', 'Pair_66', 'Pair_77', 'Pair_88', 'Pair_99', 'Pair_00', 'Pair_JJ',\n 'Pair_QQ', 'Pair_KK', 'Pair_AA', 'Pair_22', 'PairC_334455', 'PairC_445566', 'PairC_556677',\n 'PairC_667788', 'PairC_778899', 'PairC_889900', 'PairC_9900JJ', 'PairC_00JJQQ',\n 'PairC_JJQQKK', 'PairC_QQKKAA', 'PairC_33445566', 'PairC_44556677', 'PairC_55667788',\n 'PairC_66778899', 'PairC_77889900', 'PairC_889900JJ', 'PairC_9900JJQQ', 'PairC_00JJQQKK',\n 'PairC_JJQQKKAA', 'PairC_3344556677', 'PairC_4455667788', 'PairC_5566778899', 'PairC_6677889900',\n 'PairC_77889900JJ', 'PairC_889900JJQQ', 'PairC_9900JJQQKK', 'PairC_00JJQQKKAA',\n 'PairC_334455667788', 'PairC_445566778899', 'PairC_556677889900', 'PairC_6677889900JJ',\n 'PairC_77889900JJQQ', 'PairC_889900JJQQKK', 'PairC_9900JJQQKKAA', 'PairC_33445566778899',\n 'PairC_44556677889900', 'PairC_556677889900JJ', 'PairC_6677889900JJQQ',\n 'PairC_77889900JJQQKK', 'PairC_889900JJQQKKAA', 'PairC_3344556677889900', 'PairC_44556677889900JJ',\n 'PairC_556677889900JJQQ', 'PairC_6677889900JJQQKK', 'PairC_77889900JJQQKKAA',\n 'PairC_3344556677889900JJ', 'PairC_44556677889900JJQQ', 'PairC_556677889900JJQQKK',\n 'PairC_6677889900JJQQKKAA', 'PairC_3344556677889900JJQQ', 'PairC_44556677889900JJQQKK',\n 'PairC_556677889900JJQQKKAA', 'Trio_333', 'Trio_444', 'Trio_555', 'Trio_666', 'Trio_777',\n 'Trio_888', 'Trio_999', 'Trio_000', 'Trio_JJJ', 'Trio_QQQ', 'Trio_KKK', 'Trio_AAA', 'Trio_222',\n 'TrioC_333444', 'TrioC_444555', 'TrioC_555666', 'TrioC_666777', 'TrioC_777888',\n 'TrioC_888999', 'TrioC_999000', 'TrioC_000JJJ', 'TrioC_JJJQQQ', 'TrioC_QQQKKK', 'TrioC_KKKAAA',\n 'TrioC_333444555', 'TrioC_444555666', 'TrioC_555666777', 'TrioC_666777888',\n 'TrioC_777888999', 'TrioC_888999000', 'TrioC_999000JJJ', 'TrioC_000JJJQQQ', 'TrioC_JJJQQQKKK',\n 'TrioC_QQQKKKAAA', 'TrioC_333444555666', 'TrioC_444555666777', 'TrioC_555666777888',\n 'TrioC_666777888999', 'TrioC_777888999000', 'TrioC_888999000JJJ', 'TrioC_999000JJJQQQ',\n 'TrioC_000JJJQQQKKK', 'TrioC_JJJQQQKKKAAA', 'TrioC_333444555666777', 'TrioC_444555666777888',\n 'TrioC_555666777888999', 'TrioC_666777888999000', 'TrioC_777888999000JJJ', 'TrioC_888999000JJJQQQ',\n 'TrioC_999000JJJQQQKKK', 'TrioC_000JJJQQQKKKAAA', 'TrioC_333444555666777888',\n 'TrioC_444555666777888999', 'TrioC_555666777888999000', 'TrioC_666777888999000JJJ', 'TrioC_777888999000JJJQQQ',\n 'TrioC_888999000JJJQQQKKK', 'TrioC_999000JJJQQQKKKAAA', 'TrioK_333',\n 'TrioK_444', 'TrioK_555', 'TrioK_666', 'TrioK_777', 'TrioK_888', 'TrioK_999', 'TrioK_000', 'TrioK_JJJ',\n 'TrioK_QQQ', 'TrioK_KKK', 'TrioK_AAA', 'TrioK_222', 'TrioCK_333444',\n 'TrioCK_444555', 'TrioCK_555666', 'TrioCK_666777', 'TrioCK_777888', 'TrioCK_888999', 'TrioCK_999000',\n 'TrioCK_000JJJ', 'TrioCK_JJJQQQ', 'TrioCK_QQQKKK', 'TrioCK_KKKAAA',\n 'TrioCK_333444555', 'TrioCK_444555666', 'TrioCK_555666777', 'TrioCK_666777888', 'TrioCK_777888999',\n 'TrioCK_888999000', 'TrioCK_999000JJJ', 'TrioCK_000JJJQQQ', 'TrioCK_JJJQQQKKK',\n 'TrioCK_QQQKKKAAA', 'TrioCK_333444555666', 'TrioCK_444555666777', 'TrioCK_555666777888',\n 'TrioCK_666777888999', 'TrioCK_777888999000', 'TrioCK_888999000JJJ', 'TrioCK_999000JJJQQQ',\n 'TrioCK_000JJJQQQKKK', 'TrioCK_JJJQQQKKKAAA', 'TrioCK_333444555666777', 'TrioCK_444555666777888',\n 'TrioCK_555666777888999', 'TrioCK_666777888999000', 'TrioCK_777888999000JJJ',\n 'TrioCK_888999000JJJQQQ', 'TrioCK_999000JJJQQQKKK', 'TrioCK_000JJJQQQKKKAAA', 'TrioPK_333',\n 'TrioPK_444', 'TrioPK_555', 'TrioPK_666', 'TrioPK_777', 'TrioPK_888', 'TrioPK_999',\n 'TrioPK_000', 'TrioPK_JJJ', 'TrioPK_QQQ', 'TrioPK_KKK', 'TrioPK_AAA', 'TrioPK_222', 'TrioCPK_333444',\n 'TrioCPK_444555', 'TrioCPK_555666', 'TrioCPK_666777', 'TrioCPK_777888',\n 'TrioCPK_888999', 'TrioCPK_999000', 'TrioCPK_000JJJ', 'TrioCPK_JJJQQQ', 'TrioCPK_QQQKKK',\n 'TrioCPK_KKKAAA', 'TrioCPK_333444555', 'TrioCPK_444555666', 'TrioCPK_555666777',\n 'TrioCPK_666777888', 'TrioCPK_777888999', 'TrioCPK_888999000', 'TrioCPK_999000JJJ',\n 'TrioCPK_000JJJQQQ', 'TrioCPK_JJJQQQKKK', 'TrioCPK_QQQKKKAAA', 'TrioCPK_333444555666',\n 'TrioCPK_444555666777', 'TrioCPK_555666777888', 'TrioCPK_666777888999', 'TrioCPK_777888999000',\n 'TrioCPK_888999000JJJ', 'TrioCPK_999000JJJQQQ', 'TrioCPK_000JJJQQQKKK',\n 'TrioCPK_JJJQQQKKKAAA', 'BombK_3333', 'BombK_4444', 'BombK_5555', 'BombK_6666',\n 'BombK_7777', 'BombK_8888', 'BombK_9999', 'BombK_0000', 'BombK_JJJJ', 'BombK_QQQQ', 'BombK_KKKK',\n 'BombK_AAAA', 'BombK_2222', 'BombPK_3333', 'BombPK_4444', 'BombPK_5555', 'BombPK_6666',\n 'BombPK_7777', 'BombPK_8888', 'BombPK_9999', 'BombPK_0000', 'BombPK_JJJJ', 'BombPK_QQQQ',\n 'BombPK_KKKK', 'BombPK_AAAA', 'BombPK_2222', 'Bomb_3333', 'Bomb_4444', 'Bomb_5555',\n 'Bomb_6666', 'Bomb_7777', 'Bomb_8888', 'Bomb_9999', 'Bomb_0000', 'Bomb_JJJJ', 'Bomb_QQQQ',\n 'Bomb_KKKK', 'Bomb_AAAA', 'Bomb_2222', 'Rocket_wW', 'PASS']\n\n # 主牌动作历史记录动作列表\n self.record_mcard_action = [\"None\", \"START\", \"SEP\"]\n self.record_mcard_action.extend(self.mcard_action)\n\n # 主牌动作字符串转数字字典\n # 使用有限字典限定顺序\n self.mcard_action_to_num_dict = OrderedDict(((k, v) for v, k in enumerate(self.mcard_action)))\n # 主牌动作数字转字符串字典\n self.mcard_action_to_str_dict = OrderedDict(((k, v) for k, v in enumerate(self.mcard_action)))\n\n # 包含START SEP None 标签的主牌动作记录字典\n self.record_mcard_action_to_num_dict = OrderedDict(((k, v) for v, k in enumerate(self.record_mcard_action)))\n self.record_mcard_action_to_str_dict = OrderedDict(((k, v) for k, v in enumerate(self.record_mcard_action)))\n\n # 带牌类型字典\n record_ktype_action = [\"None\", \"START\", \"SEP\", \"Solo\", \"Pair\"]\n self.record_ktype_action_to_num_dict = OrderedDict(((k, v) for v, k in enumerate(record_ktype_action)))\n self.record_ktype_action_to_str_dict = OrderedDict(((k, v) for k, v in enumerate(record_ktype_action)))\n\n # 带牌长度字典\n record_knum_action = [\"None\", \"START\", \"SEP\", \"1\", \"2\", \"3\", \"4\", \"5\"]\n self.record_knum_action_to_num_dict = OrderedDict(((k, v) for v, k in enumerate(record_knum_action)))\n self.record_knum_action_to_str_dict = OrderedDict(((k, v) for k, v in enumerate(record_knum_action)))\n\n def padding_zero(self, record):\n \"\"\"\n 填充历史记录序列到指定长度\n :param record: 历史记录序列\n :return:\n \"\"\"\n assert isinstance(record, list)\n zero_list = [0 for _ in range(self.seq_len - len(record))]\n record.extend(zero_list)\n return record\n\n def sort_card(self, cards):\n \"\"\"\n 对牌进行排序\n :param cards: 待排序的牌\n :return:返回排序后的牌\n \"\"\"\n assert isinstance(cards, str)\n if cards == 'PASS':\n return cards\n card_temp = list(cards)\n card_temp.sort(key=lambda x: self.card_index.index(x))\n return ''.join(card_temp)\n\n def sub(self, cards1, cards2):\n \"\"\"\n 计算两组牌之间的差\n :param cards1: 第一组牌\n :param cards2: 第二组牌\n :return: 两组牌的差集\n \"\"\"\n assert isinstance(cards1, str)\n assert isinstance(cards2, str)\n\n d = Differ()\n\n # 断言一组牌必定包含在另外一组牌当中\n assert len(set([string[0] for string in d.compare(cards1, cards2) if string[0] in [\"-\", \"+\"]])) <= 1\n\n if len(cards1) >= len(cards2):\n remain = [j[-1] for j in filter(lambda s: s[0] == '-', d.compare(cards1, cards2))]\n else:\n remain = [j[-1] for j in filter(lambda s: s[0] == '+', d.compare(cards1, cards2))]\n return self.sort_card(\"\".join(remain))\n\n def check_total_cards(self, output_cards, hands, down_hands, up_hands):\n \"\"\"\n 检查总牌数是否相等\n :param output_cards:已经打算的牌\n :param hands:当前手牌\n :param down_hands:下家手牌\n :param up_hands:上家手牌\n :return:bool\n \"\"\"\n assert isinstance(output_cards, str)\n assert isinstance(hands, str)\n assert isinstance(down_hands, str)\n assert isinstance(up_hands, str)\n\n # 如果之前没有出过牌\n if output_cards != \"None\":\n total_cards = self.sort_card(output_cards + hands + down_hands + up_hands)\n else:\n total_cards = self.sort_card(hands + down_hands + up_hands)\n\n return total_cards == self.all_card\n\n def check_up_down_hands(self, hidden_cards, down_hands, up_hands):\n \"\"\"\n 检查上家牌和下家牌是否等于当前未知牌\n :param hidden_cards: 未知牌\n :param down_hands: 下家牌\n :param up_hands: 上家牌\n :return: bool\n \"\"\"\n assert isinstance(hidden_cards, str)\n assert isinstance(down_hands, str)\n assert isinstance(up_hands, str)\n\n up_down_total_hands = self.sort_card(down_hands + up_hands)\n\n return hidden_cards == up_down_total_hands\n\n def ktype_action_transform(self, action):\n \"\"\"\n 如果带牌类型是字符串则转换为数字,如果带牌类型是数字则转换为字符串\n :param action: 动作\n :return:\n \"\"\"\n if isinstance(action, str):\n return self.record_ktype_action_to_num_dict[action]\n else:\n return self.record_ktype_action_to_str_dict[action]\n\n def klen_action_transform(self, action):\n \"\"\"\n 如果带牌长度是字符串则转换为数字\n 如果带牌长度是数字则转换为字符串\n :param action: 动作\n :return:\n \"\"\"\n if isinstance(action, str):\n return self.record_knum_action_to_num_dict[action]\n else:\n return self.record_knum_action_to_str_dict[action]\n\n def mcard_action_transform(self, action):\n \"\"\"\n 如果主牌动作是字符串,则返回数字\n 如果主牌动作是数据则返回字符串\n :param action:\n :return:\n \"\"\"\n if isinstance(action, str):\n return self.mcard_action_to_num_dict[action]\n else:\n return self.mcard_action_to_str_dict[action]\n\n def record_mcard_action_transform(self, action):\n \"\"\"\n 这个里面有312种动作\n 如果主牌动作是字符串,则返回数字\n 如果主牌动作是数据则返回字符串\n :param action:\n :return:\n \"\"\"\n if isinstance(action, str):\n return self.record_mcard_action_to_num_dict[action]\n else:\n return self.record_mcard_action_to_str_dict[action]\n\n def read_csv(self, file_path):\n \"\"\"\n 读取CSV文件\n :param file_path: 文件路径\n :return: pd.DataFrame\n \"\"\"\n assert isinstance(file_path, str)\n\n logging.debug(\"正在读取CSV数据\")\n read_data = pd.read_csv(filepath_or_buffer=file_path, encoding=\"utf-8\", header=0)\n read_data[\"game_session\"] = np.cumsum(read_data.record == \"None\")\n\n logging.debug(\"读取CSV数据完成\")\n\n return read_data\n\n def get_one_sessoin_data(self, data, session_id):\n \"\"\"\n 获取一场比赛的游戏数据\n :param data: 全部游戏数据\n :param session_id:场次索引\n :return: one_session_pid_data 玩家角色数据\n one_session_mcard_data 玩家主牌数据\n one_session_kcard_data 玩家带牌数据\n one_session_hidden_data 未知牌数据\n one_session_hands_data 玩家手牌数据\n one_session_output_data 已经打出的牌数据\n one_session_record_data 之前打牌的历史记录\n \"\"\"\n\n assert isinstance(data, pd.DataFrame)\n\n if session_id % self.print_freq == 0:\n string = \"正在获取第{session_id}场数据\".format(session_id=session_id)\n logging.debug(string)\n\n # 单场游戏的数据\n one_session_data = data[data.game_session == session_id]\n\n # 玩家编号\n # 0 表示庄家\n # 1 表示下家\n # 2 表示上家\n one_session_pid_data = list(one_session_data.pid)\n\n # 因为有些地方pid不规范,需要做一下调整\n if one_session_pid_data[0] == 1:\n one_session_pid_data = [num - 1 for num in one_session_pid_data]\n\n # 获取主牌数据\n one_session_mcard_data = list(one_session_data.mcard)\n # 获取带牌数据\n one_session_kcard_data = list(one_session_data.kcard)\n # 获取手牌数据\n one_session_hands_data = list(one_session_data.hands)\n # 获取未知牌数据\n one_session_hidden_data = list(one_session_data.hidden)\n # 获取已经打出牌的数据\n one_session_output_data = list(one_session_data.output)\n\n if session_id % self.print_freq == 0:\n string = \"第{session_id}场数据读取完成\".format(session_id=session_id)\n logging.debug(string)\n\n result = {\n \"one_session_pid_data\": one_session_pid_data,\n \"one_session_mcard_data\": one_session_mcard_data,\n \"one_session_kcard_data\": one_session_kcard_data,\n \"one_session_hidden_data\": one_session_hidden_data,\n \"one_session_hands_data\": one_session_hands_data,\n \"one_session_output_data\": one_session_output_data\n }\n\n return result\n\n def kcard_message_transform(self, kcard_message):\n \"\"\"\n 对于带牌信息的处理\n :param kcard_message: string 表示带牌的信息\n :return: 返回带牌的信息\n \"\"\"\n assert isinstance(kcard_message, str)\n if kcard_message == \"None\": # 没有从牌的情况处理\n return [\"None\", \"None\"]\n else:\n # 有从牌的情况\n # 因为不同的牌(单牌或对牌)之间使用\";\"间隔\n # 可以使用这个提取信息\n kcard_message_split = kcard_message.split(sep=\";\")\n\n # 不同牌的种类数量\n kcard_num = len(kcard_message_split)\n\n min_num = min([len(list(_)) for _ in kcard_message_split])\n max_num = max([len(list(_)) for _ in kcard_message_split])\n\n # 如果不成立,则说明数据存在问题\n assert min_num == max_num\n\n if min_num == 1:\n return [\"Solo\", kcard_num]\n else:\n return [\"Pair\", kcard_num]\n\n def one_session_data_processing_to_csv(self, session_id, data):\n \"\"\"\n 将一场比赛数据进行处理以适应CSV形式保存\n :param session_id:游戏场次号\n :param data:\n :return:\n \"\"\"\n \"\"\"\n 读取一场游戏数据\n \"\"\"\n\n one_session_data = self.get_one_sessoin_data(data=data, session_id=session_id)\n one_session_pid_data = one_session_data[\"one_session_pid_data\"]\n one_session_mcard_data = one_session_data[\"one_session_mcard_data\"]\n one_session_kcard_data = one_session_data[\"one_session_kcard_data\"]\n one_session_hidden_data = one_session_data[\"one_session_hidden_data\"]\n one_session_hands_data = one_session_data[\"one_session_hands_data\"]\n one_session_output_data = one_session_data[\"one_session_output_data\"]\n\n # 打印一下当前正在处理的场次\n if session_id % self.print_freq == 0:\n string = \"正在处理第{session_id}场数据\".format(session_id=session_id)\n logging.debug(string)\n\n # 历史记录\n # START 起始标志\n mcard_record = [\"START\"] # 主牌的历史记录\n # 第一个位置表示牌型\n # 第二个位置表示牌数量\n ktype_record = [\"START\"] # 带牌的历史记录\n klen_record = [\"START\"]\n\n one_session_game_record = []\n\n for game_step in range(len(one_session_pid_data)):\n\n \"\"\"\n 记录前两个回合的打牌数据,用于推断当前打牌类型\n \"\"\"\n if game_step >= 1:\n tmp_mcard_two_time_before_record.append(one_session_mcard_data[game_step - 1])\n del tmp_mcard_two_time_before_record[0]\n\n else:\n tmp_mcard_two_time_before_record = [\"None\", \"None\"]\n mcard_two_time_before_record = \";\".join(tmp_mcard_two_time_before_record)\n\n \"\"\"\n 记录上家和下家的手牌信息,用于做数据预测\n \"\"\"\n if game_step < len(one_session_pid_data) - 2:\n down_hands = one_session_hands_data[game_step + 1]\n up_hands = one_session_hands_data[game_step + 2]\n # 倒数第二场\n elif game_step == len(one_session_pid_data) - 2:\n down_hands = one_session_hands_data[game_step + 1]\n up_hands = self.sub(cards1=one_session_hidden_data[game_step], cards2=down_hands)\n tmp_up_hands = up_hands\n # 最后一场\n else:\n down_hands = tmp_up_hands\n up_hands = self.sub(cards1=one_session_hidden_data[game_step], cards2=down_hands)\n\n \"\"\"\n 处理带牌信息\n \"\"\"\n kcard_message = one_session_kcard_data[game_step]\n kcard_type, kcard_num = self.kcard_message_transform(kcard_message)\n\n \"\"\"\n 将处理完的数据打包,并保存成one_session_game_record中\n \"\"\"\n one_session_game_record.append([session_id, # 场次id\n one_session_pid_data[game_step], # 玩家角色id\n mcard_record.copy(), # 主牌历史记录\n ktype_record.copy(), # 带牌类型历史记录\n klen_record.copy(), # ���牌张数历史记录\n mcard_two_time_before_record, # 之前两次的出牌记录\n one_session_output_data[game_step], # 已经打出牌的记录\n one_session_hidden_data[game_step], # 未知牌的记录\n one_session_hands_data[game_step], # 玩家手牌记录\n down_hands, # 下家手牌记录\n up_hands, # 上家手牌记录\n len(one_session_output_data[game_step]) if one_session_output_data[game_step] != \"None\" else 0, # 已经出牌的数量\n len(one_session_hidden_data[game_step]), # 未知牌的数量\n len(one_session_hands_data[game_step]), # 玩家手牌数量记录\n len(down_hands), # 下家手牌数量记录\n len(up_hands), # 上家手牌数量记录\n one_session_mcard_data[game_step], # 玩家打牌的主牌记录\n kcard_type, # 玩家出牌的带牌类型记录\n kcard_num]) # 玩家带牌数量记录\n\n \"\"\"\n 更新下一回合数据\n \"\"\"\n mcard_record.append(one_session_mcard_data[game_step])\n\n ktype_record.append(kcard_type)\n klen_record.append(str(kcard_num))\n\n \"\"\"\n 插入SEP 每一轮插入一个\"SEP\" 表示经历过一轮\n \"\"\"\n if (game_step + 1) % 3 == 0:\n mcard_record.append(\"SEP\")\n ktype_record.append(\"SEP\")\n klen_record.append(\"SEP\")\n\n if session_id % self.print_freq == 0:\n string = \"第{session_id}场数据处理完成\".format(session_id=session_id)\n logging.debug(string)\n\n return one_session_game_record\n\n def get_cards_all_action(self, cards):\n \"\"\"\n 当前牌组的所有可出动作\n :param cards: 牌组\n :return:\n \"\"\"\n\n if cards == \"None\":\n return [\"PASS\"]\n else:\n cards_all_action = set()\n counter = Counter(cards)\n for i in range(3, len(self.mcard_action_to_num_dict)):\n mcard_action = self.mcard_action_transform(action=i)\n if mcard_action == \"PASS\":\n cards_all_action.add(\"PASS\")\n else:\n mcard_type, mcards = mcard_action.split(\"_\")\n if self.mcard_in_hands(mcards=mcards, hands=cards):\n if mcard_type == \"TrioK\":\n # 对于三带一,要求总牌数必须超过四张,且有两种以上牌型\n if len(cards) >= 4 and len(counter) >= 2:\n cards_all_action.add(mcard_action)\n elif mcard_type == \"TrioPK\":\n # 对于三带一对, 要求总牌数必须5张以上,且有两种牌数量大于2\n if len(cards) >= 5 and self.get_above_n_num(cards, 2) >= 2:\n cards_all_action.add(mcard_action)\n elif mcard_type == \"TrioCK\":\n # 对于带单的飞机,每三个加一张单\n min_length = len(Counter(mcards))\n if len(cards) >= min_length * 4:\n cards_all_action.add(mcard_action)\n elif mcard_type == \"TrioCPK\":\n # 对于飞机带对牌\n min_length = len(Counter(mcards))\n if len(cards) > min_length * 5 and self.get_above_n_num(cards=cards, n=2) >= min_length * 2:\n cards_all_action.add(mcard_action)\n elif mcard_type == \"BombK\":\n if len(cards) >= 6:\n cards_all_action.add(mcard_action)\n elif mcard_type == \"BombPK\":\n if len(cards) >= 8 and (self.get_above_n_num(cards=cards, n=2) >= 3 or self.get_above_n_num(cards=cards, n=4) >= 2):\n cards_all_action.add(mcard_action)\n else:\n cards_all_action.add(mcard_action)\n return cards_all_action\n\n def mcard_action2vec(self, mcard):\n \"\"\"\n 将主牌动作转换为309维向量\n :param mcard:主牌动作\n :return:\n \"\"\"\n mcard_index = self.mcard_action_transform(action=mcard)\n mcard_action_vec = [0 for _ in range(self.num_mcard_action)]\n mcard_action_vec[mcard_index] = 1\n return np.array(mcard_action_vec)\n\n def cards_all_action2vec(self, cards_all_action):\n \"\"\"\n 根据当前牌求出所有可执行动作,并将这个动作用向量表示,共309维\n :param cards_all_action:\n :return:\n \"\"\"\n cards_all_action_vec = [0 for _ in range(self.num_mcard_action)]\n for mcard_action in cards_all_action:\n cards_all_action_vec += self.mcard_action2vec(mcard=mcard_action)\n return cards_all_action_vec\n\n def mcard_in_hands(self, mcards, hands):\n \"\"\"\n 判断当前主牌动作在当前手牌下是不是可出\n :param mcards:主牌动作\n :param hands:手牌\n :return:\n \"\"\"\n d = Differ()\n mcards = self.sort_card(cards=mcards)\n hands = self.sort_card(cards=hands)\n\n for s in d.compare(a=hands, b=mcards):\n if s[0] == \"+\":\n return False\n return True\n\n def get_above_n_cards(self, cards, n):\n \"\"\"\n 统计手牌中张数多于n的牌\n :param cards: 手牌\n :param n: 数量\n :return:\n \"\"\"\n counter = Counter(cards)\n return [card for card, num in counter.items() if num >= n]\n\n def get_above_n_num(self, cards, n):\n \"\"\"\n 统计手牌中数量多于n的手牌的数量\n :param cards: 手牌\n :param n: 数量\n :return:\n \"\"\"\n return len(self.get_above_n_cards(cards=cards, n=n))\n\n def card2vec(self, cards):\n \"\"\"\n 将卡牌数据转换成向量\n 0个A表示成[1,0,0,0,0]\n 1个A用于成[0,1,0,0,0]\n 将不同类型手牌拼接,最终为69维向量\n :param cards:手牌\n :return:\n \"\"\"\n hands_counter = Counter(cards)\n result = []\n for card_type in self.card_index:\n if card_type not in \"wW\":\n tmp = [0, 0, 0, 0, 0]\n if card_type in hands_counter.keys():\n tmp[hands_counter[card_type]] = 1\n else:\n tmp[0] = 1\n else:\n tmp = [0, 0]\n if card_type in hands_counter.keys():\n tmp[hands_counter[card_type]] = 1\n else:\n tmp[0] = 1\n result.extend(tmp)\n return np.array(result)\n\n def one_session_data_processing_to_pickle(self, session_id, data):\n \"\"\"\n 对一场比赛数据进行处理,处理之后数据用于保存成pickle文件\n :param session_id:场次编号\n :param data:数据\n :return:\n \"\"\"\n one_session_data = self.get_one_sessoin_data(data=data, session_id=session_id)\n one_session_pid_data = one_session_data[\"one_session_pid_data\"]\n one_session_mcard_data = one_session_data[\"one_session_mcard_data\"]\n one_session_kcard_data = one_session_data[\"one_session_kcard_data\"]\n one_session_hidden_data = one_session_data[\"one_session_hidden_data\"]\n one_session_hands_data = one_session_data[\"one_session_hands_data\"]\n one_session_output_data = one_session_data[\"one_session_output_data\"]\n\n if session_id % self.print_freq == 0:\n string = \"正在处理第{session_id}场数据\".format(session_id=session_id)\n logging.debug(string)\n\n # 历史记录\n # START 起始标志\n mcard_record = [self.record_mcard_action_transform(action=\"START\")] # 主牌的历史记录\n # 第一个位置表示牌型\n # 第二个位置表示牌数量\n ktype_record = [self.ktype_action_transform(action=\"START\")] # 带牌的历史记录\n klen_record = [self.klen_action_transform(action=\"START\")]\n\n one_session_game_record = []\n\n for game_step in range(len(one_session_pid_data)):\n\n # 之前两场的游戏记录\n if game_step >= 1:\n mcard_two_time_before_record.append(self.record_mcard_action_transform(one_session_mcard_data[game_step - 1]))\n del mcard_two_time_before_record[0]\n\n else:\n mcard_two_time_before_record = [self.record_mcard_action_transform(\"None\"), self.record_mcard_action_transform(\"None\")]\n\n \"\"\"\n 计算每个玩家手牌信息\n \"\"\"\n # 对于从第一回合到到数第二回合的数据处理\n if game_step < len(one_session_pid_data) - 2:\n one_session_down_hands = one_session_hands_data[game_step + 1]\n one_session_up_hands = one_session_hands_data[game_step + 2]\n # 倒数第二场\n elif game_step == len(one_session_pid_data) - 2:\n one_session_down_hands = one_session_hands_data[game_step + 1]\n one_session_up_hands = self.sub(cards1=one_session_hidden_data[game_step], cards2=one_session_down_hands)\n tmp_one_session_up_hands = one_session_up_hands\n # 最后一场\n else:\n one_session_down_hands = tmp_one_session_up_hands\n one_session_up_hands = self.sub(cards1=one_session_hidden_data[game_step], cards2=one_session_down_hands)\n\n # 对带牌信息进行处理\n kcard_message = one_session_kcard_data[game_step]\n kcard_type, kcard_num = self.kcard_message_transform(kcard_message)\n\n # 将求出未知牌所有动作并转换为向量\n hidden_all_action_vec = self.cards_all_action2vec(self.get_cards_all_action(one_session_hidden_data[game_step]))\n # 求出已当前玩家手牌可执行动作并转换为向量\n hands_all_action_vec = self.cards_all_action2vec(self.get_cards_all_action(one_session_hands_data[game_step]))\n\n game_record_dict = {\n \"session_id\": session_id,\n \"gamer_id\": one_session_pid_data[game_step],\n \"mcard_action_record\": self.padding_zero(mcard_record.copy()),\n \"ktype_action_record\": self.padding_zero(ktype_record.copy()),\n \"klen_action_record\": self.padding_zero(klen_record.copy()),\n \"mcard_record_2s_before\": mcard_two_time_before_record.copy(),\n \"output_cards_vec\": self.card2vec(one_session_output_data[game_step]),\n \"hidden_cards_vec\": self.card2vec(one_session_hidden_data[game_step]),\n \"hands_vec\": self.card2vec(one_session_hands_data[game_step]),\n \"down_hands_vec\": self.card2vec(one_session_down_hands),\n \"up_hands_vec\": self.card2vec(one_session_up_hands),\n \"hidden_cards_action_vec\": hidden_all_action_vec,\n \"inf_hidden_cards_action_vec\": [-np.inf if _ == 0 else 0 for _ in hidden_all_action_vec],\n \"hands_action_vec\": hands_all_action_vec,\n \"inf_hands_action_vec\": [-np.inf if _ == 0 else 0 for _ in hands_all_action_vec],\n \"num_output_cards\": len(one_session_output_data[game_step]) if one_session_output_data[game_step] != \"None\" else 0,\n \"num_hidden_cards\": len(one_session_hidden_data[game_step]),\n \"num_hands\": len(one_session_hands_data[game_step]),\n \"num_up_hands\": len(one_session_up_hands),\n \"num_down_hands\": len(one_session_down_hands),\n \"next_mcard_action_label\": self.mcard_action_transform(one_session_mcard_data[game_step])\n }\n\n one_session_game_record.append(game_record_dict)\n\n # 更新下一回合游戏数据\n mcard_record.append(self.record_mcard_action_transform(one_session_mcard_data[game_step]))\n\n ktype_record.append(self.ktype_action_transform(kcard_type))\n klen_record.append(self.klen_action_transform(str(kcard_num)))\n\n # 每一轮插入一个\"SEP\"\n # 表示经历过一轮\n if (game_step + 1) % 3 == 0:\n mcard_record.append(self.record_mcard_action_transform(\"SEP\"))\n ktype_record.append(self.ktype_action_transform(\"SEP\"))\n klen_record.append(self.klen_action_transform(\"SEP\"))\n\n if session_id % self.print_freq == 0:\n string = \"第{session_id}场数据处理完成\".format(session_id=session_id)\n logging.debug(string)\n\n return one_session_game_record\n\n def processing_data_to_csv(self, data):\n \"\"\"\n 对数据进行预处理,返回处理结果\n :param data: 原始数据\n :return: 处理后数据\n \"\"\"\n num_session = max(data.game_session)\n\n result = []\n\n for session_id in range(1, num_session + 1):\n one_session_game_record = self.one_session_data_processing_to_csv(session_id=session_id, data=data)\n result.extend(one_session_game_record)\n\n # result = []\n #\n # num_session = len(tmp_game_record)\n #\n # for session_id in range(num_session):\n # result.extend(tmp_game_record[session_id])\n\n result = [[session_id, # 游戏场次记录\n gamer_id, # 玩家角色id记录\n \";\".join(mcard_record), # 历史主牌记录\n \";\".join(ktype_record), # 历史带牌类型记录\n \";\".join(klen_record), # 历史带牌数量记录\n mcard_two_time_before_record,\n output_cards, # 已经打出的牌的记录\n hidden_cards, # 未知牌记录\n hands, # 玩家手牌记录\n down_hands, # 玩家手牌记录\n up_hands, # 上家手牌记录\n num_output_cards,\n num_hidden_cards,\n num_hands,\n num_down_hands,\n num_up_hands,\n mcard_play, # 玩家打出的主牌\n ktype_play, # 玩家的带牌类型\n klen_play,\n self.check_total_cards(output_cards=output_cards,\n hands=hands,\n down_hands=down_hands,\n up_hands=up_hands) and\n self.check_up_down_hands(hidden_cards=hidden_cards,\n down_hands=down_hands,\n up_hands=up_hands)] for # 玩家的带牌数量\n session_id,\n gamer_id,\n mcard_record,\n ktype_record,\n klen_record,\n mcard_two_time_before_record,\n output_cards,\n hidden_cards,\n hands,\n down_hands,\n up_hands,\n num_output_cards,\n num_hidden_cards,\n num_hands,\n num_down_hands,\n num_up_hands,\n mcard_play,\n ktype_play,\n klen_play in result]\n\n result = pd.DataFrame(result)\n\n result.columns = [\"session_id\",\n \"gamer_id\",\n \"mcard_record\",\n \"ktype_record\",\n \"klen_record\",\n \"mcard_two_time_before_record\",\n \"output_cards\",\n \"hidden_cards\",\n \"hands\",\n \"down_hands\",\n \"up_hands\",\n \"num_output_cards\",\n \"num_hidden_cards\",\n \"num_hands\",\n \"num_down_hands\",\n \"num_up_hands\",\n \"mcard_play\",\n \"ktype_play\",\n \"klen_play\",\n \"is_qualified\"]\n\n return result\n\n # def processing_data_to_pickle(self, data):\n # \"\"\"\n # 将所有数据进行预处理并保存成pickle文件\n # :param data:\n # :return:\n # \"\"\"\n # num_session = max(data.game_session)\n #\n # print(num_session)\n #\n # for session_id in range(1, num_session + 1):\n # one_session_game_record = self.one_session_data_processing_to_pickle(session_id=session_id, data=data)\n # result.extend(one_session_game_record)\n #\n # return result\n #\n # def save_to_pickle(self, data, save_path, num_record_of_one_pickle):\n # # 统计记录的条数\n # num_record = len(data)\n # # 数据分割索引\n # data_seq = [i for i in np.arange(start=0, stop=num_record, step=num_record_of_one_pickle)]\n #\n # # 设置保存文件路径\n #\n # if not os.path.exists(save_path):\n # os.mkdir(save_path)\n #\n # [os.remove(os.path.join(save_path, file_name)) for file_name in os.listdir(save_path)]\n #\n # # 按之前索引分割文件\n # for i in range(len(data_seq)):\n # if i < len(data_seq) - 1:\n # logging.debug(\"正在保存第{start}到{end}条数据\".format(start=data_seq[i], end=data_seq[i + 1]))\n # result_one_slice = data[data_seq[i]:data_seq[i + 1]]\n # else:\n # logging.debug(\"正在保存第{start}到{end}条数据\".format(start=data_seq[i], end=len(data)))\n # result_one_slice = data[data_seq[i]:]\n #\n # save_file_name = \"\".join([save_path, \"/\", save_path, \"_\", str(i), \".pickle\"])\n #\n # # 保存数据\n # with open(file=save_file_name, mode=\"wb\") as f:\n # pickle.dump(result_one_slice, f)\n # logging.debug(\"保存全部pickle数据完成\")\n\n def processing_and_save_to_pickle(self, save_file_name, data, session_batch=5000):\n num_session = max(data.game_session)\n file_index = 0\n num_index = 0\n result = []\n for session_id in range(30001, num_session + 1):\n one_session_game_record = self.one_session_data_processing_to_pickle(session_id=session_id, data=data)\n result.extend(one_session_game_record)\n num_index += 1\n\n if num_index == session_batch:\n num_index = 0\n with open(file=save_file_name + \"_\" + str(file_index) + \".pickle\", mode=\"wb\") as f:\n pickle.dump(result, f)\n result = []\n file_index += 1\n logging.debug(\"第\" + str(file_index) + \"份数据保存完毕\")\n\n with open(file=save_file_name + \"_\" + str(file_index) + \".pickle\", mode=\"wb\") as f:\n pickle.dump(result, f)\n\n def save_to_csv(self, data, save_path):\n \"\"\"\n 将数据保存成CSV格式\n :param data: 数据\n :param save_path: 保存路径\n :return: None\n \"\"\"\n assert isinstance(data, pd.DataFrame)\n\n logging.debug(\"正在保存CSV数据\")\n\n data.to_csv(path_or_buf=save_path, index=None)\n\n logging.debug(\"保存CSV数据完成\")\n\n\nif __name__ == \"__main__\":\n read_file_path = \"sample10w.csv\"\n csv_save_path = \"sample10w_processing.csv\"\n process2rnn = Processing2RNN()\n sample10w = process2rnn.read_csv(file_path=read_file_path)\n # process2rnn.one_session_data_processing_to_pickle(data=sample, session_id=1)\n\n # processing_data_pickle = process2rnn.processing_data_to_pickle(data=sample10w)\n #\n # process2rnn.save_to_pickle(data=processing_data_pickle, save_path=\"sample10w\", num_record_of_one_pickle=5000)\n\n process2rnn.processing_and_save_to_pickle(save_file_name=\"sample100w/sample100w\", data=sample10w, session_batch=5000)\n","sub_path":"data/processing2rnn.py","file_name":"processing2rnn.py","file_ext":"py","file_size_in_byte":42218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"162880753","text":"from odoo import api, fields, models, _\nfrom odoo.exceptions import UserError\n\n\nclass SaleOrderWarehouse(models.Model):\n _inherit = \"sale.order\"\n\n warehouse_id = fields.Many2one('stock.warehouse', string='Warehouse',\n domain=\"[('company_id', '=', company_id)]\")\n\n name = fields.Char(string='Order Reference', required=True, copy=False,\n readonly=True, states={'draft': [('readonly', False)]},\n index=True, default=lambda self: _('New'))\n\n @api.model\n def create(self, vals):\n wh = self.env['stock.warehouse'].browse(vals.get('warehouse_id'))\n if vals.get('name', _('New')) == _('New'):\n if wh.code in ('HCALL', 'H52', 'H33', 'HD52', 'HDBQ', 'HSA', 'HSM', 'AKT'):\n vals['name'] = self.env['ir.sequence'].next_by_code(wh.code.lower()) or _('New')\n res = super(SaleOrderWarehouse, self).create(vals)\n return res\n\n @api.model\n def _prepare_invoice(self):\n\n res = super(SaleOrderWarehouse, self)._prepare_invoice()\n res['warehouse_id'] = self.warehouse_id.id\n\n return res\n\n @api.model\n def _create_invoices(self, grouped=False, final=False):\n res = super(SaleOrderWarehouse, self)._create_invoices()\n\n res['journal_id'] = res.journal_id.search(\n [('warehouse_id', '=', self.warehouse_id.name),\n ('type', '=', 'sale')], limit=1)\n\n return res\n","sub_path":"gec_accounting/models/sale.py","file_name":"sale.py","file_ext":"py","file_size_in_byte":1460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"310641133","text":"from django.http import JsonResponse\nfrom api import models\nfrom rest_framework import serializers\nfrom rest_framework.views import APIView\n# from api import serializers as api_serializers\nfrom api.serializers import srl\nfrom rest_framework.response import Response\nfrom django.shortcuts import HttpResponse\nfrom django.shortcuts import render\nimport json\nfrom api.utlis.response import BaseResponse\n\n# Create your views here.\n\n\nclass DegreeCourse(APIView):\n\n def get(self, request, version, key):\n print('key', key)\n # # print('request.content_type', request.content_type)\n # # print('request.GET', request.GET)\n # # print('request.parsers', request.parsers)\n # query = dict(request.GET)\n # print('request.GET', query)\n\n # # for k, v in request.GET:\n # # print(k, v)\n data = BaseResponse()\n data.data = {}\n if key == 'scholarship': # b.查看所有学位课并打印学位课名称以及学位课的奖学金\n degree_course = models.DegreeCourse.objects.all() # 查询所有学位课\n for x in degree_course:\n scholarship_obj = x.scholarship_set.all() # 一对多反向查询所有奖学金\n scholarship_obj = srl.ScholarshipSer(\n scholarship_obj, many=True).data # 序列化所有奖学金\n data.data[x.name] = scholarship_obj\n data.code = 1\n elif key == 'teacher':\n # a.查看所有学位课并打印学位课名称以及授课老师\n degree_course = models.DegreeCourse.objects.all() # 查询所有学位课\n for x in degree_course:\n all_teacher = x.teachers.all() # 多对对查询所有老师queryset\n all_teacher = srl.TeacherSer(\n all_teacher, many=True).data # 序列化所有老师queryset\n data.data[x.name] = all_teacher\n data.code = 1\n elif key.isdigit(): # d. 查看id=1的学位课对���的所有模块名称\n # print('key.isdigit()', key.isdigit())\n degree_course_obj = models.DegreeCourse.objects.get(id=key)\n all_course = degree_course_obj.course_set.all()\n all_course = srl.CourseSer(all_course, many=True).data\n data.data = all_course\n\n else:\n data.error = '缺少查询关键字!'\n\n # 查询所有DegreeCourse\n try:\n pass\n except Exception as e:\n data.code = -1\n data.error = '获取数据失败!'\n\n return HttpResponse(json.dumps(data.dict, ensure_ascii=False))\n # return HttpResponse(data.dict)\n # return Response(data.dict)\n","sub_path":"api/views/degreecourse.py","file_name":"degreecourse.py","file_ext":"py","file_size_in_byte":2721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"453938811","text":"\"\"\"\nExample script to show how to use Multilink Element\n\"\"\"\n\n# Python Base Import\nfrom smc import session\nfrom smc.elements.netlink import StaticNetlink, MultilinkMember, Multilink\nfrom smc.elements.network import Network, Router\nfrom smc.vpn.elements import ConnectionType\nfrom smc_info import *\n\nif __name__ == '__main__':\n session.login(url=SMC_URL, api_key=API_KEY, verify=False, timeout=120, api_version=API_VERSION)\n print(\"session OK\")\n\ntry:\n # create multi link\n # get first connection type\n connection_type = list(ConnectionType.objects.all())[0]\n network1 = Network(\"net-10.1.16.0/24\")\n network2 = Network(\"net-172.31.16.0/24\")\n router1 = Router(\"Etisalat Dubai Router\")\n router2 = Router(\"Du Dubai Router\")\n snl1 = StaticNetlink.create(name=\"SNL_Premier-ISP\",\n provider_name=\"ISP1\",\n output_speed=40000,\n input_speed=40000,\n probe_address=[\"10.1.16.1\"],\n network=[network1],\n gateway=router1,\n connection_type=connection_type,\n )\n snl2 = StaticNetlink.create(name=\"SNL_Second-ISP\",\n provider_name=\"ISP2\",\n output_speed=50000,\n input_speed=50000,\n probe_address=[\"172.31.16.1\"],\n network=[network2],\n gateway=router2,\n connection_type=connection_type,\n )\n\n print('SNL1\\n', snl1.data.data)\n print('SNL2\\n', snl2.data.data)\n\n print('SNL1.network\\n', snl1.network)\n print('SNL2.network\\n', snl2.network)\n l_ml_member = list()\n l_ml_member.append(MultilinkMember.create(netlink=snl1, netlink_role='active',\n ip_range='10.1.16.1-10.1.16.254'))\n l_ml_member.append(MultilinkMember.create(netlink=snl2, netlink_role='standby',\n ip_range='172.31.16.1-172.31.16.254'))\n\n oml = Multilink.create(name=\"OML_TEST\",\n multilink_members=l_ml_member)\n print('oml={} members={}'.format(str(oml), oml.members))\nexcept Exception as e:\n print(e)\n exit(1)\nfinally:\n print(\"delete elements..\")\n try:\n oml = Multilink.get(name=\"OML_TEST\")\n oml.delete()\n except Exception as e:\n print(e)\n snl1 = StaticNetlink.get(name=\"SNL_Premier-ISP\")\n snl1.delete()\n snl2 = StaticNetlink.get(name=\"SNL_Second-ISP\")\n snl2.delete()\n session.logout()\n","sub_path":"smc/examples/netlink.py","file_name":"netlink.py","file_ext":"py","file_size_in_byte":2743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"141746363","text":"import re\n\n\ndef tree(name, data, nest=1, parent=\"\", looped={}, value=\"\"):\n \"\"\"\n Internal function to properly format nested objects / lists of objects,\n and display them to the terminal (thanks, @[Dids](https://github.com/Dids)!)\n \"\"\"\n\n spacing = \"\"\n sp = \"\"\n\n i = looped.get('i', 1)\n l = looped.get('l', len(data))\n\n if nest == 1:\n spacing = \"─ \"\n\n if isinstance(data, dict):\n if nest == 1:\n value += f\"{spacing}{name}\\n\"\n\n for key in data:\n if not key:\n continue\n\n if len(looped):\n sp = re.sub(r'├', '│', re.sub(r'─', ' ', re.sub(r'└',\n ' ', parent))) + (\"├── \" if i < l else '└── ')\n else:\n sp = \" \" * len(parent if len(parent) else spacing) + \\\n (\"├── \" if i < l else '└── ')\n\n if len(key) and isinstance(data[key], dict):\n value += f\"{sp}{key}\\n\"\n value = tree(key, data[key], nest=nest+1,\n parent=sp, looped={'i': 1}, value=value)\n\n else:\n if i >= l:\n value += f\"{re.sub(r'├', '└', sp)}{key}: {data[key]}\\n\"\n else:\n value += f\"{sp}{key}: {data[key]}\\n\"\n\n i += 1\n\n elif isinstance(data, list):\n value += f\"{spacing}{name}\\n\"\n\n i = 1\n for d in data:\n if not d:\n continue\n\n value = tree(name, d, nest=nest+1, parent=spacing,\n looped={'i': i, 'l': len(data)}, value=value)\n i += 1\n\n return value\n","sub_path":"src/managers/tree.py","file_name":"tree.py","file_ext":"py","file_size_in_byte":1680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"166721499","text":"# -*- coding: utf-8 -*-\n\n# Scrapy settings for scraper project\n#\n# For simplicity, this file contains only the most important settings by\n# default. All the other settings are documented here:\n#\n# http://doc.scrapy.org/en/latest/topics/settings.html\n#\n\nBOT_NAME = 'scraper'\n\nSPIDER_MODULES = ['scraper.spiders']\nNEWSPIDER_MODULE = 'scraper.spiders'\n\n# Crawl responsibly by identifying yourself (and your website) on the user-agent\n#USER_AGENT = 'scraper (+http://www.yourdomain.com)'\n\nROBOTSTXT_OBEY = False\n\nCOOKIES_ENABLED = True\n\nDOWNLOAD_DELAY = 1\nCONCURRENT_REQUESTS_PER_DOMAIN = 1\n\nUSER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/534.55.3 (KHTML, like Gecko) Version/5.1.3 Safari/534.53.10'\n\n#LOG_FILE = 'log_scrape.log'\n#LOG_LEVEL = 'INFO'\n\nITEM_PIPELINES = {\n 'scraper.pipelines.ConvertData': 300\n}\n","sub_path":"audience/twitter-scraping/lib/scraper/scraper/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"110121914","text":"#!/usr/bin/python3\n#coding=utf-8\n\nimport os\nimport sys\n\nline_num=sys.argv[1]\nfile_name=sys.argv[2]\n\ns_l=('^','$','`','\\t')\ne_l=\"sed -i \"\n\nfor i in range(0,len(s_l)):\n e_l=e_l+\" -e '\"+line_num+\"s/\"+s_l[i]+\"/|/g' \"\n\nos.system(e_l+file_name)\n\n#print(\"sed -i \" + e_l + file_name)\n\n#\"-e \"+line_num+\"s/$/|/g\"","sub_path":"python/dotmode_to_table.py","file_name":"dotmode_to_table.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"336180929","text":"# new dictionary designed to store user info on website\r\n\r\nuser_0 = {\r\n\t'username': 'efermi',\r\n\t'first': 'enrico',\r\n\t'last': 'fermi',\r\n\t}\r\n\r\n# you can access any single piece of inforation about user\r\n# so we could use a loop to get it \r\n\r\nfor key, value in user_0.items():\r\n\tprint(\"\\nKey: \" + key)\r\n\tprint(\"Value: \" + value)\r\n\r\n# as shown in line 12, we create names for the two variables that will\r\n# hold the key and vaue in each key-value pair\r\n# can use appreviations with tit too \r\n\r\n# it also includes the dictionary name as well as the method items()\r\n# which returns a list of key-valued pairs.\r\n\r\n# the for loop then stores each of these pairs in the two variables \r\n\r\n# notice that the key-value pairs are not returned in the order in\r\n# which they are stored, even when looping through the dictionary\r\n\r\n# now we use it in fav languages loop\r\n","sub_path":"user_0.py","file_name":"user_0.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"519124772","text":"from flask_restplus import Namespace, fields\n\napi = Namespace('Hotels', 'Hotel Related APIs', path='/hotels')\n\na_hotel_room = api.model('Room Details', \n {'type': fields.String(),\n 'capacity': fields.String()\n })\n\na_hotel_check_date = api.model('Check Dates', \n {'in': fields.Date(), \n 'out': fields.Date()\n })\n\na_hotel_timestamp = api.model('Timestamp', \n {'dateCreated': fields.DateTime(),\n 'dateUpdated': fields.DateTime()\n })\n\na_hotel_details = api.model('Hotel Details', \n {'id': fields.Integer(),\n 'name': fields.String(),\n 'room': fields.Nested(a_hotel_room),\n 'details': fields.String(),\n 'checkDates': fields.Nested(a_hotel_check_date),\n 'price': fields.Float(),\n 'expirationDate': fields.Date(),\n 'isExpired': fields.Boolean(),\n 'isPackaged': fields.Boolean(),\n 'remainingRooms': fields.Integer(),\n 'timestamp': fields.Nested(a_hotel_timestamp)\n })\n\na_create_hotel = api.model('Create Hotel',\n {'name': fields.String(),\n 'room': fields.Nested(a_hotel_room),\n 'details': fields.String(),\n 'checkDates': fields.Nested(a_hotel_check_date),\n 'price': fields.Float(),\n 'expirationDate': fields.Date(),\n 'isPackaged': fields.Boolean(),\n 'remainingRooms': fields.Integer(),\n })\n\na_approve_hotel = api.model('Approve Hotel',\n {'isApproved': fields.Boolean()})\n","sub_path":"app/api/models/hotel.py","file_name":"hotel.py","file_ext":"py","file_size_in_byte":2116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"495738740","text":"class Solution(object):\n\n def get_area_of_island(self, i, j):\n x_dir = [-1, 0, 0, 1]\n y_dir = [0, -1, 1, 0]\n area = 0\n\n positions_to_search = [(i, j)]\n while positions_to_search:\n x, y = positions_to_search.pop()\n\n if self.grid[x][y] == 1:\n area += 1\n self.grid[x][y] = 0\n\n for x_change, y_change in zip(x_dir, y_dir):\n new_x = x + x_change\n new_y = y + y_change\n if (0 <= new_x < len(self.grid) and\n 0 <= new_y < len(self.grid[0]) and\n self.grid[new_x][new_y] == 1):\n positions_to_search.append((new_x, new_y))\n\n return area\n\n def maxAreaOfIsland(self, grid):\n if len(grid) == 0 or len(grid[0]) == 0:\n return 0\n\n self.grid = grid\n area = 0\n for i in range(len(self.grid)):\n for j in range(len(self.grid[0])):\n if self.grid[i][j] == 1:\n area = max(area, self.get_area_of_island(i, j))\n\n return area\n\n\nclass TestSolution:\n\n def __init__(self):\n self.solution = Solution()\n\n def test_no_islands(self):\n grid = [[0, 0, 0],\n [0, 0, 0],\n [0, 0, 0]]\n\n expected = 0\n actual = self.solution.maxAreaOfIsland(grid)\n\n assert actual == expected\n\n def test_leetcode_example_1(self):\n grid = [[0,0,1,0,0,0,0,1,0,0,0,0,0],\n [0,0,0,0,0,0,0,1,1,1,0,0,0],\n [0,1,1,0,1,0,0,0,0,0,0,0,0],\n [0,1,0,0,1,1,0,0,1,0,1,0,0],\n [0,1,0,0,1,1,0,0,1,1,1,0,0],\n [0,0,0,0,0,0,0,0,0,0,1,0,0],\n [0,0,0,0,0,0,0,1,1,1,0,0,0],\n [0,0,0,0,0,0,0,1,1,0,0,0,0]]\n\n expected = 6\n actual = self.solution.maxAreaOfIsland(grid)\n\n assert actual == expected\n\n def test_leetcode_example_2(self):\n grid = [[0,0,0,0,0,0,0,0]]\n\n expected = 0\n actual = self.solution.maxAreaOfIsland(grid)\n\n assert actual == expected\n\n def test_leetcode_example_3(self):\n grid = [[1,1,0,0,0],\n [1,1,0,0,0],\n [0,0,0,1,1],\n [0,0,0,1,1]]\n\n expected = 4\n actual = self.solution.maxAreaOfIsland(grid)\n\n assert actual == expected\n\n def run_test(self, test, test_name):\n print('Running {}...'.format(test_name))\n\n try:\n test()\n except:\n print('Failed {}'.format(test_name))\n else:\n print('Passed {}!'.format(test_name))\n\n def run_tests(self):\n self.run_test(self.test_no_islands, 'test_no_islands')\n self.run_test(self.test_leetcode_example_1, 'test_leetcode_example_1')\n self.run_test(self.test_leetcode_example_2, 'test_leetcode_example_2')\n self.run_test(self.test_leetcode_example_3, 'test_leetcode_example_3')\n\n\ntester = TestSolution()\ntester.run_tests()\n","sub_path":"695_max_area_of_island.py","file_name":"695_max_area_of_island.py","file_ext":"py","file_size_in_byte":2651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"621129861","text":"#!/usr/bin/python\nimport json\nmydict ={}\nmydict['present_stimuli']=10\nmydict['type_stimulus']='letters'\nmydict['max_nback']=5\nmydict['num_stimuli']=5\nmydict['num_high_scores']=10\nfilename='game_spec.txt'\nwith open(filename,'w') as f:\n json.dump(mydict,f)\n \n","sub_path":".buildozer/android/app/generate_input_file.py","file_name":"generate_input_file.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"276413237","text":"# Roshni Surpur\n# November 16,2018\n# on my honor, I have neither given nor recieved unauthorized aid\n# http://interactivepython.org/runestone/static/pythonds/SortSearch/TheSelectionSort.html Accessed November 16,2018\n# I also used the code from the demo in class\n# This code is for the card game rummy.\n\n# import card class from separate file, and random shuffle module\nfrom card import Card\nfrom random import shuffle\nimport random\n# deck class keeps track of a deck of cards\n# this can be a full deck of cards, or a player's hand, for example\nclass Deck:\n\t# sets up full shuffled deck of cards by default\n\t# if 0 (or any positive number) is passed, an empty deck will be created\n\tdef __init__(self, full_deck=-1):\n\t\tif full_deck == -1:\n\t\t\tsuits = [\"♤\",\"♡\",\"♧\",\"♢\"]\n\t\t\tself.cards = [Card(rank, suit) for rank in range(1, 14) for suit in suits]\n\t\t\t# shuffle by default; remove if you want manual shuffling to occur\n\t\t\tself.shuffle()\n\t\telse:\n\t\t\tself.cards = []\n\n\t# uses the random module's shuffle method to shuffle cards\n\tdef shuffle(self):\n\t\tshuffle(self.cards)\n# sorts card\n\tdef sort(self):\n\t\tself.cards.sort(key=lambda x: str(x.rank), reverse=True)\n\t\t# print(\"Cards:\",self.__str__())\n# sorts card\n\tdef selectionSort(self):\n\t\tfor fillslot in range(len(self.cards)-1,0,-1):\n\t\t\tpositionOfMax=0\n\t\t\tfor location in range(1,fillslot+1):\n\t\t\t\tif int(self.cards[location].rank)>int(self.cards[positionOfMax].rank):\n\t\t\t\t\tpositionOfMax = location\n\n\t\t\ttemp = self.cards[fillslot]\n\t\t\tself.cards[fillslot] = self.cards[positionOfMax]\n\t\t\tself.cards[positionOfMax] = temp\n\n\t# deals a single card from the list\n\t# the default is the first card in the list\n\t# if a number is passed, the card at that location will be dealt\n\t# error checking would be nice, to prevent dealing cards at positions that don't exist,\n\t# or to prevent dealing a card when no cards are left in the hand\n\tdef deal(self, position=-1):\n\t\t# if len(self.cards)==0 or position==len(self.cards):\n\t\t# \treturn False\n\t\tif position==-1 or position>len(self.cards):\n\t\t# a.pop() removes and returns the item in the list\n\t\t\treturn self.cards.pop(0) #removes first item in the list which has been dealt\n\t\telse:\n\t\t\treturn self.cards.pop(position)\n\n\t# add a card to the deck\n\tdef add_card(self, card):\n\t\tself.cards.append(card)\n\n\tdef num_cards(self):\n\t\treturn len(self.cards)\n\t# returns the number of cards currently in the deck\n\t# helper function potentially for deal\n\tdef show_card(self, position=-1):\n\t\tif position==-1: #error checking?\n\t\t# a.pop() removes and returns the item in the list\n\t\t\tif len(self.cards)==0:\n\t\t\t\treturn False #checks if there are no more cards\n\t\t\treturn self.cards[0] #removes first item in the list which has been dealt\n\t\telse:\n\t\t\treturn self.cards[position]\n\n\t# checks to see if a specific card is in the deck\n\tdef contains(self, card):\n\t\tfor i in self.cards:\n\t\t\tif card.rank == i.rank and card.suit == i.suit:\n\t\t\t\treturn True\n\t\treturn False\n\tdef compare_ranks(self,position=-1):\n\t\t# checking if all the ranks are equal\n\t\tif self.cards[position].rank == self.cards[position+1].rank== self.cards[position+2].rank== self.cards[position+3].rank:\n\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\n\tdef compare_rankssuits(self,position=0):\n\t\tplayer_deck.sort()\n\t\tif self.cards[position].suit == self.cards[position+1].suit== self.cards[position+2].suit== self.cards[position+3].suit:\n\t\t\t# print(\"yes\")\n\t# if all suits are equal and ranks are in ascending and decending order\n\t\t\tif self.cards[position].rank-1 == self.cards[position+1].rank and self.cards[position].rank-2== self.cards[position+2].rank and self.cards[position].rank-3== self.cards[position+3].rank:\n\t\t\t\treturn True\n\t\treturn False\n\n\t# for printing out all the cards in the deck in a nice way\n\tdef __str__(self):\n\t\tresult = ''\n\t\tcount = 1\n\t\tfor i in self.cards:\n\t\t\tresult += \"[card \"+str(count)+\"] \"+str(i)+\"\\n\"\n\t\t\tcount=count+1\n\t\treturn result\n\n\t__repr__ = __str__\n\n\n# remove these when importing deck; for error checking only\ndealer_deck = Deck() #full deck\nplayer_deck = Deck(0) #empty deck\nremoved_cards = Deck(0) #empty deck\n# checking_deck=Deck(0)\n\n#\nfor i in range(4): #the range number is the amount of cards that are being dealt to the player\n\tplayer_deck.add_card(dealer_deck.deal()) #add the card that the dealer deals\n\n\n\n# now the user can choose to pick up a random card or pick up a card selected by computer\n# if they choose a card from computer\ndef computercard():\n\tplayer_deck.add_card(dealer_deck.deal()) #add the card that the dealer deals\n\n# if they choose a random card\ndef randomcard():\n\tprint(\"\\n***A card has been added to your hand***\")\n\n\tplayer_deck.add_card(dealer_deck.deal(random.randint(0,dealer_deck.num_cards()))) #add the card that the dealer deals\n\tplayer_deck.selectionSort()\n# user chooses which of the above options\ndef userchoice():\n\tusernum=input((\"Type 1 to choose the following card:\"+str(dealer_deck.show_card())+\" Type 2 to choose a random card.\\n\"))\n\t# print(dealer_deck.show_card())\n\tif usernum==\"1\":\n\t\tcomputercard()\n\t\tremovecard()\n\telif usernum==\"2\":\n\t\trandomcard()\n\t\tremovecard()\n\telse:\n\t\tprint(\"I did not understand your answer. Try Again:\")\n\t\tuserchoice()\n\tprint(\"\\nYour Hand:\")\n\tplayer_deck.selectionSort()\n\tprint(player_deck)\n\ndef removecard_fromuser(cardtoremove):\n\tremoved_cards.add_card(player_deck.deal(cardtoremove-1))\n\ndef removecard():\n\tremoved_cards.add_card(dealer_deck.deal())\n\n\nprint(\"\\nINSTRUCTIONS: This is the game rummy. The goal of this game is to get a 4-card sequence before you run out of cards.\\n There are two types of seuqences:\")\nprint(\"1: All the ranks are equal. Example: 4♠,4♥,4♣,4♦ or Kings♠,Kings♥,Kings♣,Kings♦ \")\nprint(\"2: The suits are the same and the ranks are in ascending or decending order. Example: 5♠,6♠,7♠,8♠ or Kings♦,Queens♦,Jack♦,10♦\")\nprint(\"Rule: You are only allowed to have 4 cards at a time, so every time a card is added to your hand, you must also remove a card from your hand.\\n You have two options to add a card to your hand:\\n1. You can take the card the computer gives you. \\n2. You can pick up a random card.\\nYou may not reuse cards\")\nplayer_deck.selectionSort()\nprint(\"\\nYOUR HAND\")\nprint(player_deck)\n\n\nwhile True:\n\tif dealer_deck==False:\n\t\tprint(\"You have run out of cards!\")\n\t\tbreak\n\tuserchoice()\n\tcardtoremove=int(input(\"What is the number of card you would like to remove?\"))\n\tremovecard_fromuser(cardtoremove)\n\tprint(\"\\nYour Hand:\")\n\tplayer_deck.selectionSort()\n\tprint(player_deck)\n\tif player_deck.compare_ranks()==True:\n\t\tprint(\"You have won!\")\n\t\tbreak\n\tif player_deck.compare_rankssuits()==True:\n\t\tprint(\"You have won!\")\n\t\tbreak\n\tif dealer_deck.show_card()==False:\n\t\tprint(\"There are no more cards to deal! You have lost\")\n\t\tbreak\n","sub_path":"classes/demodeck.py","file_name":"demodeck.py","file_ext":"py","file_size_in_byte":6713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"99293293","text":"class animal:\r\n def __init__(self,nombre):\r\n self.nombre = nombre\r\n self.patas = 4\r\n self.tipo = \"Can\"\r\n\r\nperro = animal(\"Kei\")\r\ngato = animal(\"Igna\")\r\n\r\nprint(perro.nombre,perro.patas,perro.tipo)\r\nprint(gato.nombre,gato.patas,gato.tipo)","sub_path":"3 - Programacion orientada a objetos/12POO/prueba_init.py","file_name":"prueba_init.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"506806713","text":"# Definition for a binary tree node.\nclass TreeNode:\n val = 0\n left = None\n right = None\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution:\n def isValidBST(self, root: TreeNode) -> bool:\n if root == None:\n return True\n isValid, _, _ = self.isValidSubBST(root)\n return isValid\n \n def isValidSubBST(self, root: TreeNode) -> [bool, int, int]:\n min = root.val\n max = root.val\n if root.left != None:\n isValid, sub_min, sub_max = self.isValidSubBST(root.left)\n if isValid and sub_max < root.val:\n min = sub_min\n else:\n return False, -1, -1\n if root.right != None:\n isValid, sub_min, sub_max = self.isValidSubBST(root.right)\n if isValid and sub_min > root.val:\n max = sub_max\n else:\n return False, -1, -1\n return True, min, max\n \n","sub_path":"normal/isValidBST.py","file_name":"isValidBST.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"48609267","text":"import re # module for processing regular expressions https://docs.python.org/3/library/re.html\n\n# Initial prompt to user\nline = input(\"Enter a phone number to validate or 'exit' when done. \")\n\n# Define your regex\nphone_regex = r'([(]*[0-9]{3}[)]*)[-\\s]?([0-9]{3})[-\\s]?([0-9]*)'\n\nwhile line != \"exit\":\n # Find matches\n phone_numbers = re.findall(phone_regex, line)\n\n # If no match found, print that no number was found\n if not phone_numbers:\n print('Phone number not valid')\n\n # Else, break number up into area code, prefix, and suffic\n else:\n phone_number = phone_numbers[0]\n print(\n f'area code: {phone_number[0]}, prexif: {phone_number[1]}, suffix: {phone_number[2]}')\n\n # As a stretch goal, you can modify your regex to search for country codes\n # too and print that out as well!\n\n # Done validating, read in a new line\n line = input(\"Enter a phone number to validate or 'exit' when done. \")\n","sub_path":"legacy/projects/state-mach-regex/phone/phone.py","file_name":"phone.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"268763075","text":"from main.models import Pass,Camera,Road\nfrom main.api.serializers import PassSerializer,CameraSerializer,RoadSerializer\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom django.db.models import Max\nimport geopy.distance\nimport sys\n\nclass GetCuLocation(APIView):\n\n def post(self,request,format = None):\n plate_char = request.POST[\"plate_char\"]\n plate_num = request.POST[\"plate_num\"]\n year = int(request.POST[\"year\"])\n month = int(request.POST[\"month\"])\n day = int(request.POST[\"day\"])\n args = Pass.objects.filter(plate_char = plate_char,plate_num = plate_num)# or whatever arbitrary queryset\n max = 0\n l = None\n for i in args:\n if int(i.camera.sequence) > max and int(i.day) == day and int(i.year) == year and int(i.month) == month:\n max = int(i.camera.sequence)\n l = i.camera\n\n if l is not None:\n serializer = CameraSerializer(l)\n return Response(serializer.data)\n else :\n return Response(\"The car is not in bounds\")\n\nclass GetLocations(APIView):\n\n def post(self,request,format = None):\n plate_char = request.POST[\"plate_char\"]\n plate_num = request.POST[\"plate_num\"]\n hour = int(request.POST[\"hour\"])\n minute = int(request.POST[\"minute\"])\n year = int(request.POST[\"year\"])\n month = int(request.POST[\"month\"])\n day = int(request.POST[\"day\"])\n\n args = Pass.objects.filter(plate_char = plate_char,plate_num = plate_num)# or whatever arbitrary queryset\n temp = list()\n for i in args:\n if int(i.hour) < hour and int(i.day) == day and int(i.year) == year and int(i.month) == month:\n temp.append(i)\n elif int(i.hour) == hour and int(i.minute) <= minute and int(i.day) == day and int(i.year) == year and int(i.month) == month:\n temp.append(i)\n\n if len(temp) != 0:\n serializer = PassSerializer(temp,many=True)\n return Response(serializer.data)\n else :\n return Response(\"The car is not in bounds\")\n\nclass GetNumCar(APIView):\n\n def post(self,request,format = None):\n seq = request.POST[\"sequence\"]\n rid = request.POST[\"roadId\"]\n cam_id = Camera.objects.filter(sequence=seq,road_id=rid).first().cam_id\n h = int(request.POST[\"hour\"])\n m = int(request.POST[\"minute\"])\n t = int(request.POST[\"period\"])\n year = int(request.POST[\"year\"])\n month = int(request.POST[\"month\"])\n day = int(request.POST[\"day\"])\n tm = None\n p = Pass.objects.filter(camera_id=cam_id)\n temp = list()\n\n if m >= t:\n tm = m - t\n for i in p:\n if int(i.hour) == h and int(i.minute) <= m and int(i.minute) >= tm and int(i.day) == day and int(i.year) == year and int(i.month) == month:\n temp.append(i)\n else:\n tm = 60 - (int(t) - int(m))\n for i in p:\n if int(i.hour) == h and int(i.minute) <= m and int(i.day) == day and int(i.year) == year and int(i.month) == month:\n temp.append(i)\n elif int(i.hour) == h - 1 and int(i.minute) >= tm and int(i.day) == day and int(i.year) == year and int(i.month) == month:\n temp.append(i)\n\n if len(temp) != 0:\n return Response(len(temp))\n else :\n return Response(\"The car is not in bounds\")\n\nclass GetSpeedCar(APIView):\n\n def post(self,request,format = None):\n plate_char = request.POST[\"plate_char\"]\n plate_num = request.POST[\"plate_num\"]\n hour = int(request.POST[\"hour\"])\n minute = int(request.POST[\"minute\"])\n year = int(request.POST[\"year\"])\n month = int(request.POST[\"month\"])\n day = int(request.POST[\"day\"])\n args = Pass.objects.filter(plate_char = plate_char,plate_num = plate_num)\n max = 0\n min = sys.maxsize\n ma = None\n mi = None\n for i in args:\n if int(i.hour) < hour and int(i.camera.sequence) > max and int(i.hour) < hour and int(i.day) == day and int(i.year) == year and int(i.month) == month:\n max = int(i.camera.sequence)\n ma = i\n elif int(i.camera.sequence) > max and int(i.hour) == hour and int(i.minute) <= minute and int(i.day) == day and int(i.year) == year and int(i.month) == month:\n max = int(i.camera.sequence)\n ma = i\n if int(i.hour) < hour and int(i.camera.sequence) < min and int(i.day) == day and int(i.year) == year and int(i.month) == month:\n min = int(i.camera.sequence)\n mi = i\n elif int(i.camera.sequence) < min and int(i.hour) == hour and int(i.minute) <= minute and int(i.day) == day and int(i.year) == year and int(i.month) == month:\n min = int(i.camera.sequence)\n mi = i\n if ma == None or mi == None:\n return Response(\"There are not any data!\")\\\n\n coords_1 = (ma.latitude,ma.longitude)\n coords_2 = (mi.latitude,mi.longitude)\n d = geopy.distance.vincenty(coords_1, coords_2).km\n t = ma.hour - mi.hour\n if t == 0:\n t = 1\n if d == 0:\n return Response(\"There are not enough data!\")\n ans = d/t\n return Response(ans)\n\nclass CreateCamera(APIView):\n def post(self, request, format= None):\n serializer = CameraSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n cid = request.POST[\"cam_id\"]\n rid = request.POST[\"roadID\"]\n c = Camera.objects.filter(cam_id = cid).first()\n c.road = Road.objects.filter(road_id = rid).first()\n c.save()\n return Response(\"The camera is registered successfully!\", status=status.HTTP_200_OK)\n return Response(serializer.errors,status=status.HTTP_200_OK)\n\nclass UpdateCamera(APIView):\n def post(self, request, format= None):\n cid = request.POST[\"cam_id\"]\n rid = request.POST[\"roadID\"]\n la = request.POST[\"latitude\"]\n lo = request.POST[\"longitude\"]\n pr = request.POST[\"province\"]\n seq = request.POST[\"sequence\"]\n c = Camera.objects.filter(cam_id = cid).first()\n c.road = Road.objects.filter(road_id = rid).first()\n c.longitude = lo\n c.latitude = la\n c.province = pr\n c.sequence = seq\n c.save()\n return Response(\"The camera is updated successfully!\", status=status.HTTP_200_OK)\n\nclass CreateRoad(APIView):\n def post(self, request, format= None):\n serializer = RoadSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(\"The road is registered successfully!\", status=status.HTTP_200_OK)\n return Response(serializer.errors,status=status.HTTP_200_OK)\n\nclass GetRoads(APIView):\n\n def get(self, request, format= None):\n r = Road.objects.all()\n serializer = RoadSerializer(r,many=True)\n return Response(serializer.data)\n\nclass GetCameras(APIView):\n def post(self, request, format= None):\n rid = request.POST[\"roadID\"]\n r = Camera.objects.filter(road_id=rid)\n serializer = CameraSerializer(r,many=True)\n return Response(serializer.data)\n\n\n\n \n","sub_path":"main/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"185251116","text":"# -*- coding: utf-8 -*-\n#######\n# actinia-core - an open source REST API for scalable, distributed, high\n# performance processing of geographical data that uses GRASS GIS for\n# computational tasks. For details, see https://actinia.mundialis.de/\n#\n# Copyright (c) 2016-2018 Sören Gebbert and mundialis GmbH & Co. KG\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n#\n#######\n\n\"\"\"\nModule management to parser GRASS xml response\n\n\"\"\"\nimport json\nimport xmltodict\n\nfrom actinia_gdi.model.gmodules import Module\nfrom actinia_gdi.model.gmodules import ModuleParameter, ModuleParameterSchema\nfrom actinia_gdi.resources.logging import log\nfrom actinia_gdi.resources.templating import tplEnv\n\n\n__license__ = \"GPLv3\"\n__author__ = \"Carmen Tawalika\"\n__copyright__ = \"Copyright 2019, mundialis\"\n__maintainer__ = \"Carmen Tawalika\"\n\n\ndef logstring(module_id, param, key):\n log.debug(module_id + \" \" + param + \" has no key \" + key)\n\n\ndef setParameterKey(module_id, parameter):\n try:\n key = parameter['@name']\n except KeyError:\n key = None\n logstring(module_id, key, \"name\")\n\n return key\n\n\ndef setVirtualParameterKey(module_id, parameter):\n try:\n key = module_id + '_' + parameter['@name']\n except KeyError:\n key = None\n logstring(module_id, key, \"name\")\n\n return key\n\n\ndef setParameterDescription(module_id, key, parameter, kwargs):\n param_descr = \"\"\n\n try:\n param_descr = parameter['label']\n kwargs['description'] = param_descr\n except KeyError:\n # logstring(module_id, key, \"label\")\n pass\n try:\n param_descr += '. ' + parameter['description']\n kwargs['description'] = param_descr\n except KeyError:\n logstring(module_id, key, \"description\")\n\n return kwargs\n\n\ndef setParameterRequired(parameter, kwargs):\n try:\n required = parameter['@required']\n if required == 'yes':\n kwargs['required'] = True\n else:\n kwargs['required'] = False\n except KeyError:\n required = \"False\"\n\n return kwargs\n\n\ndef setParamType(module_id, key, parameter, schema_kwargs):\n try:\n # grass parameter types can only be string, double or integer\n gtype = parameter['@type']\n if gtype in ('float', 'double'):\n gtype = 'number'\n schema_kwargs['type'] = gtype\n except KeyError:\n logstring(module_id, key, \"type\")\n try:\n multiple = parameter['@multiple']\n if multiple == 'yes':\n gtype = 'array'\n schema_kwargs['type'] = gtype\n except KeyError:\n logstring(module_id, key, \"multiple\")\n try:\n for subtype_key, val in parameter['gisprompt'].items():\n if subtype_key == \"@element\":\n schema_kwargs['subtype'] = val\n except Exception:\n pass\n\n return schema_kwargs\n\n\ndef setParameterDefault(parameter, schema_kwargs):\n try:\n schema_kwargs['default'] = parameter['default']\n if parameter['default'] is None:\n schema_kwargs['default'] = ''\n except KeyError:\n pass\n\n return schema_kwargs\n\n\ndef setParameterEnum(parameter, schema_kwargs):\n try:\n enum = []\n for enum_key, val in parameter['values'].items():\n for item in val:\n for i in item:\n if i == \"name\":\n enum.append(item[i])\n if len(enum) > 0:\n schema_kwargs['enum'] = enum\n except KeyError:\n pass\n\n return schema_kwargs\n\n\ndef isOutput(parameter):\n \"\"\" Checks if parameter is output parameter.\n Returns True if parameter has key\n 'gisprompt.age' == 'new',\n False otherwise.\n \"\"\"\n try:\n if '@age' in parameter['gisprompt'].keys():\n return (parameter['gisprompt']['@age'] == 'new')\n else:\n return False\n except KeyError:\n return False\n\n\ndef ParseInterfaceDescription(xml_string, keys=None):\n \"\"\"Parses output of GRASS interface-description\n and returns openEO process object\n \"\"\"\n\n gm_dict = xmltodict.parse(xml_string)['task']\n\n module_id = gm_dict['@name']\n description = gm_dict['description']\n categories = gm_dict['keywords'].replace(' ', '').split(',')\n categories.append('grass-module')\n parameters = {}\n returns = {}\n extrakwargs = dict()\n\n try:\n grass_params = gm_dict['parameter']\n except KeyError:\n logstring(module_id, \"\", \"has no parameter\")\n grass_params = []\n\n try:\n flags = gm_dict['flag']\n except KeyError:\n logstring(module_id, \"\", \"has no flags\")\n flags = []\n\n for parameter in grass_params:\n\n kwargs = dict()\n schema_kwargs = dict()\n\n if keys:\n # case for actinia modules\n key = setVirtualParameterKey(module_id, parameter)\n if key not in keys:\n continue\n else:\n # case for GRASS modules\n key = setParameterKey(module_id, parameter)\n\n schema_kwargs = setParamType(module_id, key, parameter, schema_kwargs)\n kwargs = setParameterDescription(module_id, key, parameter, kwargs)\n kwargs = setParameterRequired(parameter, kwargs)\n schema_kwargs = setParameterDefault(parameter, schema_kwargs)\n schema_kwargs = setParameterEnum(parameter, schema_kwargs)\n\n param_object = ModuleParameter(\n **kwargs,\n schema=ModuleParameterSchema(**schema_kwargs)\n )\n if isOutput(parameter):\n returns[key] = param_object\n else:\n parameters[key] = param_object\n del kwargs\n del schema_kwargs\n\n for parameter in flags:\n # not possible to specify flag values via template at the moment\n if keys:\n continue\n\n kwargs = dict()\n schema_kwargs = dict()\n schema_kwargs['type'] = 'boolean'\n schema_kwargs['default'] = 'False'\n\n key = setParameterKey(module_id, parameter)\n\n kwargs = setParameterDescription(module_id, key, parameter, kwargs)\n kwargs = setParameterRequired(parameter, kwargs)\n\n param_object = ModuleParameter(\n **kwargs,\n schema=ModuleParameterSchema(**schema_kwargs)\n )\n parameters[key] = param_object\n del kwargs\n del schema_kwargs\n\n # custom extention for importer + exporter from actinia_core\n try:\n tpl = tplEnv.get_template('gmodules/' + module_id + '.json')\n pc_template = json.loads(tpl.render().replace('\\n', ''))\n for key in [*pc_template]:\n extrakwargs[key] = {}\n for param in pc_template[key]:\n extrakwargs[key][param] = ModuleParameter(**pc_template[key][param])\n except Exception as e:\n # if no template for module exist, use as is (default)\n log.debug('template %s does not exist.', e)\n\n grass_module = Module(\n id=module_id,\n description=description,\n categories=sorted(categories),\n parameters=parameters,\n returns=returns,\n **extrakwargs\n )\n\n return grass_module\n","sub_path":"actinia_gdi/core/gmodulesParser.py","file_name":"gmodulesParser.py","file_ext":"py","file_size_in_byte":7694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"84136835","text":"from OpenGL.GL import *\nfrom OpenGL.GLUT import *\nfrom OpenGL.GLU import *\nimport serial\nimport os\nimport threading\nimport wx\nimport math\nimport sys\nfrom wx import glcanvas as glc\n\nESCAPE = '\\033'\n\nwindow = 0\n\n# rotation\nX_AXIS = 0.0\nY_AXIS = 0.0\nZ_AXIS = 0.0\n\nDIRECTION = 1\n\nhapticBallPlat = [10.0, 1.0, 5.0]\n\nbackground_color = [0.706, .706, 0.718]\n\nSCENE_DEPTH = -20.0\n\nSCENE_HEIGHT = -2\n\nrot_x = 0\nrot_y = 0\n\nvel_x = 0\nvel_z = 0\n\nballPosition = [0.0, 0.0, 0.0]\n\ndef InitGL(Width, Height):\n glClearColor(0.0, 0.0, 0.0, 0.0)\n glClearDepth(1.0)\n glDepthFunc(GL_LESS)\n glEnable(GL_DEPTH_TEST)\n glShadeModel(GL_SMOOTH)\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n gluPerspective(45.0, float(Width) / float(Height), 0.1, 100.0)\n glMatrixMode(GL_MODELVIEW)\n glClearColor(background_color[0],\n background_color[1],\n background_color[2],\n 0.0)\n\n\ndef keyPressed(*args):\n if args[0] == ESCAPE:\n sys.exit()\n\n\ndef DrawGLScene():\n global X_AXIS, Y_AXIS, Z_AXIS\n global DIRECTION\n\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n\n glLoadIdentity()\n glTranslatef(0.0, SCENE_HEIGHT, SCENE_DEPTH)\n\n glRotatef(X_AXIS, 1.0, 0.0, 0.0)\n glRotatef(Y_AXIS, 0.0, 1.0, 0.0)\n glRotatef(Z_AXIS, 0.0, 0.0, 1.0)\n\n # drawPlatform()\n drawBall()\n\n posx, posy = wx.GetMousePosition()\n width, height = wx.DisplaySize()\n Z_AXIS, X_AXIS = getPlatformRotation(posx, posy, width, height, 30)\n\n #X_AXIS = X_AXIS - 0.10\n #Z_AXIS = Z_AXIS - 0.10\n glutSwapBuffers()\ndef drawBall():\n global ballPosition\n color = [0, 0, 0]\n opacity = 1.0\n BALL_X, BALL_Y, BALL_Z = calculateBallPos()\n glLoadIdentity()\n glColor4f(color[0], color[1], color[2], opacity)\n #glPushMatrix()\n glTranslatef(BALL_X, BALL_Y+SCENE_HEIGHT, BALL_Z+SCENE_DEPTH)\n ballPosition = [BALL_X, BALL_Y, BALL_Z]\n glutSolidSphere(0.4, 16, 16)\n #glPopMatrix()\n\ndef calculateBallPos():\n global vel_x\n global vel_z\n global BALL_X\n global BALL_Y\n global BALL_Z\n vel_x += .01 * rot_x\n vel_z += .01 * rot_y\n if not ballPosition[0] - (vel_x * .001) >= 3 and not ballPosition[0] - (vel_x * .001) <= -3:\n BALL_X = ballPosition[0] - (vel_x * .001)\n elif ballPosition[0] - (vel_x * .001) > 3:\n BALL_X = 3\n elif ballPosition[0] - (vel_x * .001) < -3:\n BALL_X = -3\n BALL_Y = ballPosition[1]\n if not ballPosition[2] - (vel_z * .001) >= 5 and not ballPosition[2] - (vel_z * .001) <= -5:\n BALL_Z = ballPosition[2] - (vel_z * .001)\n elif ballPosition[0] - (vel_x * .001) > 5:\n BALL_Z = 5\n elif ballPosition[0] - (vel_x * .001) < -5:\n BALL_Z = -5\n\n return BALL_X, BALL_Y, BALL_Z\n\ndef getPlatformRotation(posx, posy, width, height, limit):\n \"\"\"\n @function getPlatformRotation\n Given the coordinates of the mouse, this function calculates the angle at which the haptic ball\n platform should be drawm\n @param posx the position x of the mouse coordinates\n @param width the width of the display in pixels\n @param limit the limit of rotation for the platform in degrees\n @return platRotation the rotation of the platform in degrees\n \"\"\"\n limiter_x = (float(width) / 2) / limit # calculates a divisor for the rotation calculation based on a supplied angle\n limiter_y = (float(height) / 2) / limit\n plat_rot_x = (((float(width) / 2) - posx)) / limiter_x\n global rot_x\n rot_x = plat_rot_x\n plat_rot_y = (-((float(height) / 2) - posy)) / limiter_y\n global rot_y\n rot_y = plat_rot_y\n # gets the difference between the middle and coordinate and then divides by a number to convert to a degree\n return plat_rot_x, plat_rot_y\n\n\ndef drawCuboid(color, opacity, left_lower_front, left_upper_front, right_lower_front, right_upper_front,\n left_lower_back,\n left_upper_back, right_lower_back, right_upper_back, side_color):\n # GL.glLoadIdentity()\n glBegin(GL_QUADS)\n # top face\n glColor4f(color[0], color[1], color[2], opacity) # color\n glVertex3f(left_upper_back[0], left_upper_back[1], left_upper_back[2])\n glVertex3f(left_upper_front[0], left_upper_front[1], left_upper_front[2])\n glVertex3f(right_upper_front[0], right_upper_front[1], right_upper_front[2])\n glVertex3f(right_upper_back[0], right_upper_back[1], right_upper_back[2])\n # GL.glEnd()\n\n # bottom face\n glColor4f(color[0], color[1], color[2], opacity) # color\n glVertex3f(left_lower_back[0], left_lower_back[1], left_lower_back[2])\n glVertex3f(left_lower_front[0], left_lower_front[1], left_lower_front[2])\n glVertex3f(right_lower_front[0], right_lower_front[1], right_lower_front[2])\n glVertex3f(right_lower_back[0], right_lower_back[1], right_lower_back[2])\n\n # front face\n glColor4f(side_color[0], side_color[1], side_color[2], opacity) # color\n glVertex3f(left_upper_front[0], left_upper_front[1], left_upper_front[2])\n glVertex3f(left_lower_front[0], left_lower_front[1], left_lower_front[2])\n glVertex3f(right_lower_front[0], right_lower_front[1], right_lower_front[2])\n glVertex3f(right_upper_front[0], right_upper_front[1], right_upper_front[2])\n\n # back face\n glColor4f(side_color[0], side_color[1], side_color[2], opacity) # color\n glVertex3f(left_lower_back[0], left_lower_back[1], left_lower_back[2])\n glVertex3f(left_upper_back[0], left_upper_back[1], left_upper_back[2])\n glVertex3f(right_upper_back[0], right_upper_back[1], right_upper_back[2])\n glVertex3f(right_lower_back[0], right_lower_back[1], right_lower_back[2])\n\n # left face\n # GL.glBegin(GL.GL_QUADS)\n glColor4f(side_color[0], side_color[1], side_color[2], opacity) # color\n glVertex3f(left_lower_back[0], left_lower_back[1], left_lower_back[2])\n glVertex3f(left_upper_back[0], left_upper_back[1], left_upper_back[2])\n glVertex3f(left_upper_front[0], left_upper_front[1], left_upper_front[2])\n glVertex3f(left_lower_front[0], left_lower_front[1], left_lower_front[2])\n\n # right face\n # GL.glBegin(GL.GL_QUADS)\n glColor4f(side_color[0], side_color[1], side_color[2], opacity) # color\n glVertex3f(right_lower_back[0], right_lower_back[1], right_lower_back[2])\n glVertex3f(right_upper_back[0], right_upper_back[1], right_upper_back[2])\n glVertex3f(right_upper_front[0], right_upper_front[1], right_upper_front[2])\n glVertex3f(right_lower_front[0], right_lower_front[1], right_lower_front[2])\n glEnd()\n\n\ndef drawPlatform():\n \"\"\"\n @function drawContainer\n Creates an a cuboid to represent the platform\n \"\"\"\n color = [0, 0, 0]\n side_color = [.1, .1, .1]\n opacity = 100.0\n centre = [0, 0, 0]\n width = hapticBallPlat[0]\n height = hapticBallPlat[1]\n depth = hapticBallPlat[2]\n\n points = dimensionsToCoordinates(centre, width, height, depth)\n\n rotated_points = points\n\n drawCuboid(color, opacity, rotated_points[0], rotated_points[1], rotated_points[2], rotated_points[3],\n rotated_points[4], rotated_points[5], rotated_points[6], rotated_points[7], side_color)\n\n\ndef dimensionsToCoordinates(centre, width, height, depth):\n \"\"\"\n @function dimensionsToCoordinates calculates the x, y and z values of a cuboid given the center, length width\n and height\n @param centre: centre coordinates of the cuboid\n @param width: width of cuboid\n @param height: height of cuboid\n @param depth: depth (front to back) of cuboid\n @return: returns a list of points\n \"\"\"\n left_lower_front = [(centre[0] - (width / 2)), (centre[1] - (height / 2)), (centre[2] + (depth / 2))]\n left_upper_front = [(centre[0] - (width / 2)), (centre[1] + (height / 2)), (centre[2] + (depth / 2))]\n right_lower_front = [(centre[0] + (width / 2)), (centre[1] - (height / 2)), (centre[2] + (depth / 2))]\n right_upper_front = [(centre[0] + (width / 2)), (centre[1] + (height / 2)), (centre[2] + (depth / 2))]\n left_lower_back = [(centre[0] - (width / 2)), (centre[1] - (height / 2)), (centre[2] - (depth / 2))]\n left_upper_back = [(centre[0] - (width / 2)), (centre[1] + (height / 2)), (centre[2] - (depth / 2))]\n right_lower_back = [(centre[0] + (width / 2)), (centre[1] - (height / 2)), (centre[2] - (depth / 2))]\n right_upper_back = [(centre[0] + (width / 2)), (centre[1] + (height / 2)), (centre[2] - (depth / 2))]\n\n points = [left_lower_front, left_upper_front, right_lower_front, right_upper_front, left_lower_back,\n left_upper_back, right_lower_back, right_upper_back]\n\n return points\n\ndef main():\n global window\n\n\n\n glutInit(sys.argv)\n glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH)\n glutInitWindowSize(1920, 1080)\n glutInitWindowPosition(200, 200)\n\n\n window = glutCreateWindow('OpenGL Python Cube')\n\n glutDisplayFunc(DrawGLScene)\n glutIdleFunc(DrawGLScene)\n glutKeyboardFunc(keyPressed)\n InitGL(1920, 1080)\n glutMainLoop()\n\nif __name__ == \"__main__\":\n app=wx.App(None)\n main()","sub_path":"rotations_testing.py","file_name":"rotations_testing.py","file_ext":"py","file_size_in_byte":9012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"581805447","text":"print(\"Case #1:\")\r\n\r\nsieve = [True] * (2 ** 16 + 1)\r\nsieve[0] = False\r\nsieve[1] = False\r\n\r\nfor i in range(2, len(sieve)):\r\n if not sieve[i]: continue\r\n sieve[2*i:len(sieve):i] = [False] * ((len(sieve)-1) // i - 1)\r\n\r\nprimes = [i for i, v in enumerate(sieve) if v]\r\n\r\nstart = 2 ** 31 + 1\r\nend = 2 ** 32 - 1\r\n\r\ncoins = []\r\nfor n in range(start, end+1, 2):\r\n s = bin(n)[2:]\r\n divisors = []\r\n for b in range(2, 10+1):\r\n n = int(s, b)\r\n res = next((p for p in primes if n % p == 0), None)\r\n if res:\r\n divisors.append(res)\r\n else:\r\n break\r\n if len(divisors) == 9:\r\n coins.append((s, divisors))\r\n if len(coins) == 500:\r\n break\r\n \r\nfor s, divisors in coins:\r\n print(s, ' '.join(map(str, divisors)))","sub_path":"codes/CodeJamCrawler/16_0_3/icedtrees/QualificationC.py","file_name":"QualificationC.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"68718482","text":"from itertools import combinations\r\nimport pandas as pd\r\n\r\ndef database(data, minsupport, minlen):\r\n ts=pd.get_dummies(data.unstack().dropna()).groupby(level=1).sum()\r\n collen, rowlen =ts.shape\r\n pattern = []\r\n for colnum in range(minlen, rowlen+1):\r\n for cols in combinations(ts, colnum):\r\n\r\n patsum = ts[list(cols)].all(axis=1).sum()\r\n pattern.append([\",\".join(cols), patsum])\r\n sdf = pd.DataFrame(pattern, columns=[\"Pattern\", \"Support\"])\r\n results=sdf[sdf.Support >= minsupport]\r\n freqset = results.reset_index(drop=True).values\r\n return freqset\r\n\r\ndef frequentItemSets(frequentSets):\r\n sets=[]\r\n for value,key in frequentSets:\r\n lens = str(value).split(',')\r\n freq=[]\r\n #print(lens)\r\n if len(lens)>1:\r\n for j in range(len(lens)):\r\n freq.append(\"\".join(list(lens[j])))\r\n sets.append(freq)\r\n sets.append(key)\r\n else:\r\n sets.append([\",\".join(lens)])\r\n sets.append(key)\r\n return sets\r\ndef defassociationRule(associationSets):\r\n associationRule = []\r\n for item in associationSets:\r\n if isinstance(item, list):\r\n if len(item) != 0:\r\n length = len(item) - 1\r\n while length > 0:\r\n combination = list(combinations(item, length))\r\n temp = []\r\n for Rkey in combination:\r\n Lkey = set(item) - set(Rkey)\r\n temp.append(list(Lkey))\r\n temp.append(list(Rkey))\r\n associationRule.append(temp)\r\n temp = []\r\n length = length - 1\r\n return associationRule\r\ndef confidenceSets(rules_for_association,data,minimumConfidence):\r\n Output = []\r\n noOfTransactions = len(data)\r\n for rule in rules_for_association:\r\n supportOfX = 0\r\n supportOfXinPercentage = 0\r\n supportOfXandY = 0\r\n supportOfXandYinPercentage = 0\r\n for transaction in data:\r\n\r\n if set(rule[0]).issubset(set(transaction)):\r\n supportOfX = supportOfX + 1\r\n if set(rule[0] + rule[1]).issubset(set(transaction)):\r\n supportOfXandY = supportOfXandY + 1\r\n supportOfXinPercentage = (supportOfX * 1.0 / noOfTransactions) * 100\r\n supportOfXandYinPercentage = (supportOfXandY * 1.0 / noOfTransactions) * 100\r\n confidence = (supportOfXandYinPercentage / supportOfXinPercentage) * 100\r\n if confidence >= minimumConfidence:\r\n supportOfXAppendString = str(rule[0])+\":\"+ str(round(supportOfXinPercentage, 2))\r\n supportOfXandYAppendString = str(rule[0])+\" & \"+str(rule[1])+\": \" + str(round(supportOfXandYinPercentage))\r\n confidenceAppendString = str(round(confidence))\r\n returnAprioriOutput=[]\r\n returnAprioriOutput.append(supportOfXAppendString)\r\n returnAprioriOutput.append(supportOfXandYAppendString)\r\n returnAprioriOutput.append(confidenceAppendString)\r\n returnAprioriOutput.append(str(rule[0])+\"-->\"+str(rule[1]))\r\n Output.append(returnAprioriOutput)\r\n return Output\r\n\r\nprint(\"Select from the following dataset: \")\r\nprint(\"1. Office Products\")\r\nprint(\"2. Groceries\")\r\nprint(\"3. Electronics\")\r\nprint(\"4. Clothes\")\r\nprint(\"5. Kitchen Utensils\")\r\nprint(\"\\n\")\r\nfileNameInput = input(\"Enter File Number: \")\r\nminSupport = input(\"Enter Minimum Support: \")\r\nminConf = input(\"Enter Minimum Confidence: \")\r\n\r\nif fileNameInput == '1':\r\n fileName = 'Office_depot.txt'\r\nif fileNameInput == '2':\r\n fileName = 'Walmart.txt'\r\nif fileNameInput == '3':\r\n fileName = 'Amazon.txt'\r\nif fileNameInput == '4':\r\n fileName = 'Asos.txt'\r\nif fileNameInput == '5':\r\n fileName = 'Target.txt'\r\nread_file = pd.read_csv(fileName, index_col=0)\r\ndataSet = read_file.values\r\nminSupport = (int(minSupport))*20/100\r\nminConf = int(minConf)\r\ntransform_dataset = database(read_file,minSupport, 1)\r\nfrequent = frequentItemSets(transform_dataset)\r\nrules_association = defassociationRule(frequent)\r\noutput = confidenceSets(rules_association,dataSet,minConf)\r\ncounter = 1\r\nif len(output) == 0:\r\n print(\"There are no association rules for this support and confidence.\")\r\nelse:\r\n pd.set_option('display.max_columns',None)\r\n df= pd.DataFrame(output,columns=[\"Support(X)\",\"Support(XUY)\", \"Confidence\", \"Rule\"])\r\n print(df)\r\n\r\n\r\n\r\n\r\n","sub_path":"Apriori.py","file_name":"Apriori.py","file_ext":"py","file_size_in_byte":4478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"185577660","text":"import sys\nflags = [83, 99, 147, 163]\nwhile True:\n line = sys.stdin.readline()\n if line:\n fields = line.split('\\t')\n if int(fields[1]) not in flags:\n print(line),\n else:\n break\n \n\n\n\n","sub_path":"cc_mcc_seq/16sVirusesExome/1.filter.py","file_name":"1.filter.py","file_ext":"py","file_size_in_byte":230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"445655696","text":"\ndict_weapons = {\"Rezavý meč\": [10, 5], # name : price, attack\n \"Kvalitní meč\": [20, 10],\n \"Obouruční meč\": [50, 20]\n }\n \n\ndict_armors = {\"Kožená zbroj\": [10, 2], # name : price, defend\n \"Kroužková košile\": [20, 4],\n \"Lehké brnění\": [40, 8],\n \"Plátová zbroj\": [60, 12]\n }\n\n\ndict_artefacts = {\"Zuby Vucub Caqiche\" : [50, \"Vucub\"]} # name : price, ID \n\n\ndict_potions = {\"Malý lektvar života\": [10, 20], # name : price, hp\n \"Střední lektvar života\": [18, 40],\n \"Velký lektvar života\": [30, 80],\n }\n\n\nholly_weapons = {\"Zbíral\" : \"František\",\n \"Chalupa\" : \"Meč Mariána Weisse\",\n \"Papoušek\" : \"Okovaná Tóra\",\n \"Bělka\" : \"Sňéža\",\n \"Fujda\" : \"Posvátná kráva\"\n }\n\nshow_shop_dict = {\"weapons\" : dict_weapons,\n \"armors\" : dict_armors,\n \"artefacts\" : dict_artefacts,\n \"potions\" : dict_potions}\n\nshow_shop_dict_02 = {\"zbraně\" : \"weapons\",\n \"zbroje\" : \"armors\",\n \"artefakty\" : \"artefacts\",\n \"lektvary\" : \"potions\"}","sub_path":"reli_dict.py","file_name":"reli_dict.py","file_ext":"py","file_size_in_byte":1295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"557453409","text":"from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\nfrom sign_data.views import signup,thankyou\nfrom rango.views import register,index,user_login,tweet,restricted,user_logout,messagetweet\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'mysite1.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n\n url(r'^admin/', include(admin.site.urls)),\n url(r'^signup/$',signup),\n url(r'^thank_you/$',thankyou),\n url(r'^register/$',register, name='register'),\n url(r'^$',index),\n url(r'^login/$',user_login,name='login'),\n url(r'^tweet/$',tweet,name='tweet'),\n url(r'^restricted/',restricted, name='restricted'),\n url(r'^logout/$', user_logout, name='logout'),\n url(r'^messagetweet/$',messagetweet,name='messagetweet'),\n)\n","sub_path":"mysite1/mysite1/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"161702530","text":"\n\n#calss header\nclass _EMERGE():\n\tdef __init__(self,): \n\t\tself.name = \"EMERGE\"\n\t\tself.definitions = [u'to appear by coming out of something or out from behind something: ', u'to come to the end of a difficult period or experience: ', u'to become known, especially as a result of examining something or asking questions about it: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'verbs'\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/verbs/_emerge.py","file_name":"_emerge.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"604080574","text":"import json\nimport os\nimport sys\n\n############################\n# Read and Write Functions #\n############################\n\n\n# Create config file if it does not exist\ndef initialize():\n exists = os.path.isfile('config.json')\n if not exists:\n config = {}\n config['bot'] = []\n config['bot'].append({\n 'token': '',\n 'command_prefix': '.',\n 'pay_day': None,\n 'timein_exclusions': [],\n 'timein_channel': None,\n 'message_limit': 500\n })\n\n config['users'] = []\n\n update(config)\n\n\n# Update config file\ndef update(config):\n with open(\"config.json\", \"w\") as json_file:\n json.dump(config, json_file, indent=4, sort_keys=True)\n\n\n# Add a user to the config file\ndef add_user(user):\n config = get_config()\n # User is already in the config\n if user_in_config(user):\n return\n else:\n config['users'].append({\n 'name': user.id,\n 'emote': 'yum',\n 'time_zone': None,\n 'pay_rate': None,\n 'used_identifiers': []\n })\n update(config)\n\n\ndef update_user(user, param, value):\n config = get_config()\n for p in config['users']:\n if p['name'] == user.id:\n config['users'][config['users'].index(p)][param] = value\n update(config)\n\n##################\n# Set Functions #\n##################\n\n\ndef set_timein_channel(channel):\n config = get_config()\n config['bot'][0]['timein_channel'] = channel.id\n update(config)\n\n\ndef set_pay_day(day):\n config = get_config()\n config['bot'][0]['pay_day'] = day\n update(config)\n\ndef set_message_limit(num):\n config = get_config()\n config['bot'][0]['message_limit'] = num\n update(config)\n\ndef add_indentifier(user, indentifier):\n indentifiers = get_user_val(user, 'used_identifiers')\n indentifiers.append(indentifier)\n update_user(user, 'used_identifiers', indentifiers)\n\n\ndef add_ti_exclusion(user):\n config = get_config()\n config['bot'][0]['timein_exclusions'].append(user.id)\n update(config)\n\n\ndef del_ti_exclusion(user):\n config = get_config()\n config['bot'][0]['timein_exclusions'].remove(user.id)\n update(config)\n\n####################\n# Access Functions #\n####################\n\n\ndef get_config():\n with open('config.json') as json_file:\n config = json.load(json_file)\n return config\n\n\ndef get_token():\n config = get_config()\n token = config['bot'][0]['token']\n # Exit bot if token is not defined\n if len(token)<=0:\n print(\"[Error] Please define a token in config.json\")\n sys.exit()\n else:\n return token\n\n\ndef get_user_val(user, param):\n config = get_config()\n for p in config['users']:\n if p['name'] == user.id:\n return p[param]\n\n\ndef get_command_prefix():\n config = get_config()\n return config['bot'][0]['command_prefix']\n\n\ndef get_timein_channel():\n config = get_config()\n return config['bot'][0]['timein_channel']\n\n\ndef get_timein_exclusions():\n config = get_config()\n return config['bot'][0]['timein_exclusions']\n\ndef get_payday():\n config = get_config()\n return config['bot'][0]['pay_day']\n\ndef get_message_limit():\n config = get_config()\n return config['bot'][0]['message_limit']\n\n####################\n# Helper Functions #\n####################\n\n\ndef user_in_config(user):\n config = get_config()\n for p in config['users']:\n if p['name'] == user.id:\n return True\n return False","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":3525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"507191661","text":"puladores= int(input(\"quantos puladores são? \"))\nwhile puladores>0:\n lista=[]\n nome= int(input(\"nome: \"))\n for aux in range (3):\n lista.append(float(input()))\n nmax= lista[0]\n for j in lista:\n if j>nmax:\n nmax=j\n \n puladores= puladores-1\n \nprint(lista)\n","sub_path":"Pythons/melhorsaltador.py","file_name":"melhorsaltador.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"282241997","text":"import csv\nimport numpy as np\nfrom imutils.video import WebcamVideoStream\nfrom imutils.video import FileVideoStream\nimport argparse\nimport time\nimport cv2\nimport os\nimport sys\nimport imutils\nfrom imutils.video import VideoStream\nfrom imutils.video import FPS\nimport xml.etree.ElementTree as ET\nfrom xml.etree import ElementTree\nfrom xml.dom import minidom\nfrom pandas import DataFrame, read_csv\nimport matplotlib.pyplot as plt\nimport pandas as pd \nfrom pandas import ExcelWriter\nfrom pandas import ExcelFile\nfrom collections import namedtuple\n#----------------------------------------------------------------------\n#Function for write the CSV file\ndef csv_writer(data, path):\n \"\"\"\n Write data to a CSV file path\n \"\"\"\n with open(path, \"w\") as csv_file:\n writer = csv.writer(csv_file, delimiter=',')\n for line in data:\n writer.writerow(line)\n\n#Function for count the nu,ber of files in the folder\ndef filecount(dir_name):\n\treturn len([f for f in os.listdir(dir_name) if os.path.isfile(f)])\n\n#----------------------------------------------------------------------\nRectangle = namedtuple('Rectangle', 'xmin ymin xmax ymax')\n\n# construct the argument parse and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-p\", \"--prototxt\", required=True,\n\thelp=\"path to Caffe 'deploy' prototxt file\")\nap.add_argument(\"-m\", \"--model\", required=True,\n\thelp=\"path to Caffe pre-trained model\")\nap.add_argument(\"-c\", \"--confidence\", type=float, default=0.5,\n\thelp=\"minimum probability to filter weak detections\")\nargs = vars(ap.parse_args())\n\n# initialize the list of class labels MobileNet SSD was trained to\n# detect, then generate a set of bounding box colors for each class\nCLASSES = [\"background\", \"aeroplane\", \"bicycle\", \"bird\", \"boat\",\n\t\"bottle\", \"bus\", \"car\", \"cat\", \"chair\", \"cow\", \"diningtable\",\n\t\"dog\", \"horse\", \"motorbike\", \"person\", \"pottedplant\", \"sheep\",\n\t\"sofa\", \"train\", \"tvmonitor\"]\nCOLORS = np.random.uniform(0, 255, size=(len(CLASSES), 3))\n\n# load our serialized model from disk\nprint(\"Loading model...\")\nnet = cv2.dnn.readNetFromCaffe(args[\"prototxt\"], args[\"model\"])\n\n\nvs = cv2.VideoCapture(\"./frame%d.jpg\")\n\nfgbg = cv2.createBackgroundSubtractorMOG2()\nfgbg.setBackgroundRatio(0.01)\n#fgbg = cv2.bgsegm.createBackgroundSubtractorMOG()\n\n\nwhile(1):\n\tsquares = []\n\tret, frame = vs.read() #ret = indica se houve uma capura, frame = frma do video\n\t#frame = imutils.resize(frame, width=320)\n\ttraited_frame = frame\n\t#frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\tfgmask = fgbg.apply(traited_frame)\n\tthresh = cv2.threshold(fgmask, 25, 255, cv2.THRESH_BINARY)[1]\n\tthresh = cv2.erode(thresh,None,iterations = 1)\n\tthresh = cv2.dilate(thresh, None, iterations=1)\n\t(test, contour, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\tfor c in contour:\n\t\tif cv2.contourArea(c) > 700:\n\t\t\t(x, y, w, h) = cv2.boundingRect(c)\n\t\t\trects = Rectangle(x, y, w, h)\n\t\t\tsquares.append(rects)\n\t\t\t#cv2.rectangle(traited_frame, (x, y), (x+w, y+h), 255, 2)\n\t\n\tif len(squares) > 0:\n\n\n\t\txfinal = 999\n\t\tyfinal = 999\n\t\twfinal = 0\n\t\thfinal = 0\t\t\t\t\t\t\t\t\t\n\n\t\tfor i in range(0, len(squares)):\n\n\t\t\tx, y, w, h = squares[i]\n\t\t\tw = w + x\n\t\t\th = h + y\n\t\t\t# print(x, y, w, h)\n\t\t\tif xfinal > x:\n\t\t\t\txfinal = x\n\t\t\tif yfinal > y:\n\t\t\t\tyfinal = y\n\t\t\tif wfinal < w:\n\t\t\t\twfinal = w\n\t\t\tif hfinal < h:\n\t\t\t\thfinal = h\n\t\t\t\n\t\t#cv2.rectangle(traited_frame, (xfinal, yfinal), (wfinal, hfinal), (0, 0, 255), 2)\n\t\tuseful_image = frame[yfinal:hfinal, xfinal:wfinal]\n\t\t#cv2.imshow(\"DETCTION\", useful_image)\n\t\t(htest, wtest) = useful_image.shape[:2]\n\t\tblob = cv2.dnn.blobFromImage(useful_image,\t0.007843, (300, 300), 127.5) \n\t\tnet.setInput(blob)\n\t\tdetections = net.forward()\n\t\tfor i in np.arange(0, detections.shape[2]):\n\t\t\t# extract the confidence (i.e., probability) associated with the prediction\n\t\t\tconfidence = detections[0, 0, i, 2]\n\t\t\tif confidence > args[\"confidence\"]:\n\t\t\t\t# extract the index of the class label from the `detections`, then compute the (x, y)-coordinates of the bounding box for the object\n\t\t\t\tidx = int(detections[0, 0, i, 1])\n\t\t\t\tif idx == 15:\n\t\t\t\t\tbox = detections[0, 0, i, 3:7] * np.array([wtest, htest, wtest, htest])\n\t\t\t\t\t(startX, startY, endX, endY) = box.astype(\"int\")\n\t\t\t\t\t\n\t\t\t\t\tstartX = startX + xfinal\n\t\t\t\t\tstartY = startY + yfinal\n\t\t\t\t\t#print(startX, startY, endX, endY)\n\t\t\t\t\tendX = endX + xfinal\n\t\t\t\t\tendY = endY + yfinal\n \t\t# draw the prediction on the frame\n\t\t\t\t\tlabel = \"{}: {:.2f}%\".format(CLASSES[idx], confidence * 100)\n\t\t\t\t\tcv2.rectangle(frame, (startX, startY), (endX, endY), COLORS[idx], 2)\n\t\t\t\t\ty = startY - 15 if startY - 15 > 15 else startY + 15\n\t\t\t\t\tcv2.putText(frame, label, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)\n\t\t\t\t\tcv2.imshow(\"final\", frame)\n\n\telse:\n\t\t\n\t\tcv2.destroyAllWindows()\n\t#input(\"Press enter to continue\")\n\n\n\tkey = cv2.waitKey(1) & 0xFF \n\t# if the `q` key was pressed, break from the loop\n\tif key == ord(\"q\"):\n\t\tbreak\n","sub_path":"2_Perfomance_test_study/Test_complet/Comparison_bureau/Background_supression.py","file_name":"Background_supression.py","file_ext":"py","file_size_in_byte":4979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"164439885","text":"n = int(input())\ntable = []\nsum = 0\ncount = 0\nsame = 0\ncheck = [False] * n\nfor i in range(n):\n a, b = map(int, input().split())\n table.append([a/b, -b/a])\n\nfor i in range(n):\n tmp = count\n for j in range(i+1, n):\n if table[i][0] == table[j][1]:\n count += 1\n if check[i]:\n check[i] += 1\n else:\n check[i] = 1\n if check[j]:\n check[j] += 1\n else:\n check[j] = 1\n # sum -= 2**(n-2)\n # print(sum)\nfor i in range(n):\n if check[i] >= 2:\n sum += 2**(n-3) * (check[i] - 1)\n same += check[i] - 1\n\nsum += 2**n - 1\nsum -= 2**(n-2) * count\nsum += 2**(n-4) * (count - same)\n\n# print(table)\nprint(check)\nprint(same)\nprint(sum)\n# print(count)\n","sub_path":"ABC/168/E.py","file_name":"E.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"132644196","text":"import subprocess\nimport sys\nimport setup_util\nimport os\n\ndef start(args, logfile, errfile):\n setup_util.replace_text(\"php-kohana/application/config/database.php\", \"localhost\", \"\"+ args.database_host +\"\")\n setup_util.replace_text(\"php-kohana/deploy/nginx.conf\", \"root .*\\/FrameworkBenchmarks/php-kohana\", \"root \" + args.troot)\n\n try:\n if os.name == 'nt':\n subprocess.check_call('icacls \"C:\\\\FrameworkBenchmarks\\\\php-kohana\" /grant \"IIS_IUSRS:(OI)(CI)F\"', shell=True, stderr=errfile, stdout=logfile)\n subprocess.check_call('appcmd add site /name:PHP /bindings:http/*:8080: /physicalPath:\"C:\\\\FrameworkBenchmarks\\\\php-kohana\"', shell=True, stderr=errfile, stdout=logfile)\n return 0\n subprocess.check_call(\"sudo chown -R www-data:www-data php-kohana\", shell=True, stderr=errfile, stdout=logfile)\n subprocess.check_call(\"sudo $PHP_FPM --fpm-config $FWROOT/config/php-fpm.conf -g $TROOT/deploy/php-fpm.pid\", shell=True, stderr=errfile, stdout=logfile)\n subprocess.check_call(\"sudo /usr/local/nginx/sbin/nginx -c $TROOT/deploy/nginx.conf\", shell=True, stderr=errfile, stdout=logfile)\n return 0\n except subprocess.CalledProcessError:\n return 1\ndef stop(logfile, errfile):\n try:\n if os.name == 'nt':\n subprocess.check_call('appcmd delete site PHP', shell=True, stderr=errfile, stdout=logfile)\n return 0\n subprocess.call(\"sudo /usr/local/nginx/sbin/nginx -s stop\", shell=True, stderr=errfile, stdout=logfile)\n subprocess.call(\"sudo kill -QUIT $( cat $TROOT/deploy/php-fpm.pid )\", shell=True, stderr=errfile, stdout=logfile)\n subprocess.check_call(\"sudo chown -R $USER:$USER php-kohana\", shell=True, stderr=errfile, stdout=logfile)\n return 0\n except subprocess.CalledProcessError:\n return 1","sub_path":"frameworks/PHP/php-kohana/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"454644384","text":"##############################################################################\n#\n# Copyright (c) 2010 ViFiB SARL and Contributors.\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the Zope Public License,\n# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.\n# THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL EXPRESS OR IMPLIED\n# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS\n# FOR A PARTICULAR PURPOSE.\n#\n##############################################################################\n\n#XXX factor with slapos/grid/networkcache.py and use libnetworkcache helpers\n\nfrom __future__ import absolute_import, print_function, division\nimport hashlib\nimport posixpath\nimport re\nimport traceback\ntry:\n # Python 3\n from urllib.error import HTTPError\n from urllib.parse import urlparse\n strify = bytes.decode\nexcept ImportError:\n # Python 2\n from urllib2 import HTTPError\n from urlparse import urlparse\n strify = str\n\ntry:\n try:\n from slapos.libnetworkcache import NetworkcacheClient, UploadError, \\\n DirectoryNotFound\n from slapos.networkcachehelper import \\\n helper_download_network_cached, \\\n helper_download_network_cached_to_file\n except ImportError:\n LIBNETWORKCACHE_ENABLED = False\n else:\n LIBNETWORKCACHE_ENABLED = True\nexcept:\n print('There was problem while trying to import slapos.libnetworkcache:\\n'\n + traceback.format_exc())\n LIBNETWORKCACHE_ENABLED = False\n print('Networkcache forced to be disabled.')\n\n_md5_re = re.compile(r'md5=([a-f0-9]+)')\n\n\ndef _get_md5_from_url(url):\n match = _md5_re.search(url)\n if match:\n return match.group(1)\n\ndef fallback_call(function):\n \"\"\"Decorator which disallow to have any problem while calling method\"\"\"\n def wrapper(self, *args, **kwd):\n \"\"\"\n Log the call, and the result of the call\n \"\"\"\n try:\n return function(self, *args, **kwd)\n except: # indeed, *any* exception is swallowed\n print('There was problem while calling method %r:\\n%s' % (\n function.__name__, traceback.format_exc()))\n return False\n wrapper.__doc__ = function.__doc__\n return wrapper\n\n\n@fallback_call\ndef get_directory_key(url):\n \"\"\"Returns directory hash based on url.\n\n Basically check if the url belongs to pypi:\n - if yes, the directory key will be pypi-buildout-urlmd5\n - if not, the directory key will be slapos-buildout-urlmd5\n # XXX why is that?\n \"\"\"\n urlmd5 = hashlib.md5(url.encode()).hexdigest()\n if 'pypi' in url:\n return 'pypi-buildout-%s' % urlmd5\n return 'slapos-buildout-%s' % urlmd5\n\n@fallback_call\ndef get_index_directory_key(url, requirement):\n \"\"\"Returns directory hash based on egg requirement.\n \"\"\"\n urlmd5 = hashlib.md5(url.encode()).hexdigest()\n return 'pypi-index-%s-%s' % (urlmd5, requirement)\n\n\n@fallback_call\ndef download_network_cached(dir_url, cache_url, path, url, logger,\n signature_certificate_list, md5sum=None):\n \"\"\"Downloads from a network cache provider\n\n If something fail (providor be offline, or hash_string fail), we ignore\n network cached files.\n\n return True if download succeeded.\n \"\"\"\n if not LIBNETWORKCACHE_ENABLED:\n return False\n\n if md5sum is None:\n md5sum = _get_md5_from_url(url)\n\n directory_key = get_directory_key(url)\n\n logger.debug('Trying to download %s from network cache...', url)\n\n if helper_download_network_cached_to_file(\n dir_url=dir_url,\n cache_url=cache_url,\n signature_certificate_list=signature_certificate_list,\n directory_key=directory_key,\n path=path):\n logger.info('Downloaded %s from network cache.', url)\n\n if check_md5sum(path, md5sum):\n return True\n logger.info('MD5 checksum mismatch downloading %s', url)\n else:\n logger.info('Cannot download %s from network cache.', url)\n return False\n\n@fallback_call\ndef download_index_network_cached(dir_url, cache_url, url, requirement, logger,\n signature_certificate_list):\n \"\"\"\n XXX description\n Downloads pypi index from a network cache provider\n\n If something fail (providor be offline, or hash_string fail), we ignore\n network cached index.\n\n return index if succeeded, False otherwise.\n \"\"\"\n if not LIBNETWORKCACHE_ENABLED:\n return False\n\n directory_key = get_index_directory_key(url, requirement)\n\n wanted_metadata_dict = {\n 'urlmd5': hashlib.md5(url.encode()).hexdigest(),\n 'requirement': requirement,\n }\n required_key_list = ['base']\n\n result = helper_download_network_cached(dir_url, cache_url,\n signature_certificate_list,\n directory_key, wanted_metadata_dict, required_key_list)\n if result:\n file_descriptor, metadata = result\n try:\n content = strify(file_descriptor.read())\n logger.info('Downloaded %s from network cache.', url)\n return content, metadata['base']\n except (IOError, DirectoryNotFound) as e:\n if isinstance(e, HTTPError) and e.code == 404:\n logger.debug('%s does not exist in network cache.', url)\n else:\n logger.debug('Failed to download from network cache %s: %s',\n url, e)\n return False\n\n@fallback_call\ndef upload_network_cached(dir_url, cache_url, external_url, path, logger,\n signature_private_key_file, shacache_ca_file, shacache_cert_file,\n shacache_key_file, shadir_ca_file, shadir_cert_file, shadir_key_file):\n \"\"\"Upload file to a network cache server\"\"\"\n # XXX use helper and FACTOR code\n if not LIBNETWORKCACHE_ENABLED:\n return False\n\n if not (dir_url and cache_url):\n return False\n\n logger.info('Uploading %s into network cache.', external_url)\n\n file_name = get_filename_from_url(external_url)\n\n directory_key = get_directory_key(external_url)\n kw = dict(file_name=file_name,\n urlmd5=hashlib.md5(external_url.encode()).hexdigest())\n\n # convert '' into None in order to call nc nicely\n if not signature_private_key_file:\n signature_private_key_file = None\n if not shacache_ca_file:\n shacache_ca_file = None\n if not shacache_cert_file:\n shacache_cert_file = None\n if not shacache_key_file:\n shacache_key_file = None\n if not shadir_ca_file:\n shadir_ca_file = None\n if not shadir_cert_file:\n shadir_cert_file = None\n if not shadir_key_file:\n shadir_key_file = None\n try:\n nc = NetworkcacheClient(cache_url, dir_url,\n signature_private_key_file=signature_private_key_file,\n shacache_ca_file=shacache_ca_file,\n shacache_cert_file=shacache_cert_file,\n shacache_key_file=shacache_key_file,\n shadir_ca_file=shadir_ca_file,\n shadir_cert_file=shadir_cert_file,\n shadir_key_file=shadir_key_file)\n except TypeError:\n logger.warning('Incompatible version of networkcache, not using it.')\n return False\n\n try:\n with open(path, 'rb') as f:\n return nc.upload(f, directory_key, **kw)\n except (IOError, UploadError) as e:\n logger.info('Fail to upload file. %s', e)\n return False\n\n@fallback_call\ndef upload_index_network_cached(dir_url, cache_url, external_url, base, requirement, content, logger,\n signature_private_key_file, shacache_ca_file, shacache_cert_file,\n shacache_key_file, shadir_ca_file, shadir_cert_file, shadir_key_file):\n # XXX use helper and FACTOR code\n \"\"\"Upload content of a web page to a network cache server\"\"\"\n if not LIBNETWORKCACHE_ENABLED:\n return False\n\n if not (dir_url and cache_url):\n return False\n\n logger.info('Uploading %s content into network cache.', external_url)\n\n directory_key = get_index_directory_key(external_url, requirement)\n kw = dict(file=\"file\",\n base=base,\n urlmd5=hashlib.md5(external_url).hexdigest(),\n requirement=requirement)\n\n import tempfile\n f = tempfile.TemporaryFile()\n f.write(content)\n\n # convert '' into None in order to call nc nicely\n if not signature_private_key_file:\n signature_private_key_file = None\n if not shacache_ca_file:\n shacache_ca_file = None\n if not shacache_cert_file:\n shacache_cert_file = None\n if not shacache_key_file:\n shacache_key_file = None\n if not shadir_ca_file:\n shadir_ca_file = None\n if not shadir_cert_file:\n shadir_cert_file = None\n if not shadir_key_file:\n shadir_key_file = None\n try:\n nc = NetworkcacheClient(cache_url, dir_url,\n signature_private_key_file=signature_private_key_file,\n shacache_ca_file=shacache_ca_file,\n shacache_cert_file=shacache_cert_file,\n shacache_key_file=shacache_key_file,\n shadir_ca_file=shadir_ca_file,\n shadir_cert_file=shadir_cert_file,\n shadir_key_file=shadir_key_file)\n except TypeError:\n logger.warning('Incompatible version of networkcache, not using it.')\n return False\n\n try:\n return nc.upload_generic(f, directory_key, **kw)\n except (IOError, UploadError) as e:\n logger.info('Fail to upload file. %s', e)\n return False\n\n finally:\n f.close()\n\n return True\n\n\n@fallback_call\ndef get_filename_from_url(url):\n \"\"\"Inspired how pip get filename from url.\n \"\"\"\n parsed_url = urlparse(url)\n if parsed_url.query and parsed_url.path.endswith('/'):\n name = parsed_url.query.split('?', 1)[0]\n elif parsed_url.path.endswith('/') and not parsed_url.query:\n name = parsed_url.path.split('/')[-2]\n else:\n name = posixpath.basename(parsed_url.path)\n\n name = name.split('#', 1)[0]\n assert name, (\n 'URL %r produced no filename' % url)\n return name\n\n\nfrom .download import check_md5sum\n","sub_path":"src/zc/buildout/networkcache.py","file_name":"networkcache.py","file_ext":"py","file_size_in_byte":10137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"589952057","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('', views.profile, name='profile'),\n path('admin/', views.admin, name='admin'),\n path(\n 'delete_attendance//',\n views.delete_attendance,\n name='delete_attendance'),\n path(\n 'order_history/',\n views.order_history,\n name='order_history'),\n]\n","sub_path":"profiles/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"353647869","text":"\"\"\"dicefile=open(\"Dice Rolling.txt\")\r\ndicefile.readline()\r\nprob=dicefile.readlines()\r\nfor element in prob:\r\n print(int(float(element)*6+1),end=\" \")\r\n \r\n \"\"\"\"\"\r\ndeneme=[1,2,3,4]\r\ndeneme.insert(0,11)\r\ndeneme.__delitem__(3)\r\nprint(deneme)","sub_path":"Dice Rolling.py","file_name":"Dice Rolling.py","file_ext":"py","file_size_in_byte":244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"148549407","text":"import logging\nimport operator\nimport textwrap\nimport json\nimport shutil\nimport collections\nimport click\nfrom prettytable import PrettyTable, ALL # type: ignore\nfrom slugify import slugify\nfrom fuzzywuzzy import fuzz # type: ignore\nfrom colorama import Fore # type: ignore\nfrom click_skeleton import AdvancedGroup\nfrom musicbot.cli.options import output_option\nfrom musicbot.cli.spotify import spotify_options\nfrom musicbot.cli.user import user_options\nfrom musicbot.cli.music_filter import music_filter_options\nfrom musicbot.music.file import STOPWORDS, REPLACEMENTS\n\nlogger = logging.getLogger(__name__)\n\n\ndef dump_tracks(tracks):\n tracks = [\n {\n 'title': t['track']['name'],\n 'artist': t['track']['artists'][0]['name'],\n 'album': t['track']['album']['name'],\n } for t in tracks\n ]\n print(json.dumps(tracks))\n\n\ndef print_tracks_table(tracks):\n if not tracks:\n return\n pt = PrettyTable([\"Track\", \"Artist\", \"Album\"])\n pt.align = 'l'\n width = shutil.get_terminal_size().columns // 3\n for t in tracks:\n title = '\\n'.join(textwrap.wrap(t['track']['name'], width))\n artist = '\\n'.join(textwrap.wrap(t['track']['artists'][0]['name'], width))\n album = '\\n'.join(textwrap.wrap(t['track']['album']['name'], width))\n pt.add_row([title, artist, album])\n print(pt)\n\n\ndef print_distances(distances):\n if not distances:\n return\n pt = PrettyTable([\"Title\", \"Artist\", \"Album\", \"Distance\"])\n pt.align = 'l'\n pt.hrules = ALL\n for distance in distances:\n st = distance['spotify_track']\n stitle = st['track']['name']\n sartist = st['track']['artists'][0]['name']\n salbum = st['track']['album']['name']\n dtitle = distance['local_track']['title']\n dartist = distance['local_track']['artist']\n dalbum = distance['local_track']['album']\n identical = True\n\n if stitle != dtitle:\n final_title = f\"{Fore.YELLOW}{stitle} (spotify){Fore.RESET}\\n{Fore.CYAN}{dtitle} (local){Fore.RESET}\"\n identical = False\n else:\n final_title = f\"{Fore.GREEN}{stitle}{Fore.RESET}\"\n\n if sartist != dartist:\n final_artist = f\"{Fore.YELLOW}{sartist} (spotify){Fore.RESET}\\n{Fore.CYAN}{dartist} (local){Fore.RESET}\"\n identical = False\n else:\n final_artist = f\"{Fore.GREEN}{sartist}{Fore.RESET}\"\n\n if salbum != dalbum:\n final_album = f\"{Fore.YELLOW}{salbum} (spotify){Fore.RESET}\\n{Fore.CYAN}{dalbum} (local){Fore.RESET}\"\n identical = False\n else:\n final_album = f\"{Fore.GREEN}{salbum}{Fore.RESET}\"\n\n if identical:\n continue\n\n d = distance['distance']\n pt.add_row([\n final_title,\n final_artist,\n final_album,\n d,\n ])\n print(pt)\n\n\ndef print_playlists_table(playlists):\n if not playlists:\n return\n pt = PrettyTable([\"Name\", \"Size\"])\n for p in playlists:\n pt.add_row([p['name'], p['tracks']['total']])\n print(pt.get_string(title='Spotify playlists'))\n\n\ndef output_tracks(output: str, tracks):\n if output == 'table':\n print_tracks_table(tracks)\n elif output == 'json':\n dump_tracks(tracks)\n\n\n@click.group('spotify', help='Spotify tool', cls=AdvancedGroup)\ndef cli():\n pass\n\n\n@cli.command(help='Generate a new token', aliases=['auth'])\n@spotify_options\ndef new_token(spotify):\n print(spotify.new_token())\n\n\n@cli.command(help='Token informations')\n@spotify_options\ndef cached_token(spotify):\n print(spotify.cached_token())\n print(f\"Expired : {spotify.is_token_expired()}\")\n\n\n@cli.command(help='Get a new token')\n@spotify_options\ndef refresh_token(spotify):\n print(spotify.refresh_token())\n\n\n@cli.command(help='List playlists')\n@spotify_options\ndef playlists(spotify):\n print_playlists_table(spotify.playlists())\n\n\n@cli.command(help='Show playlist')\n@spotify_options\n@output_option\n@click.argument(\"name\")\ndef playlist(name, spotify, output):\n tracks = spotify.playlist_tracks(name)\n output_tracks(output, tracks)\n\n\n@cli.command(help='Show tracks')\n@spotify_options\n@output_option\ndef tracks(spotify, output):\n tracks = spotify.tracks()\n output_tracks(output, tracks)\n\n\n@cli.command(help='Diff between local and spotify')\n@user_options\n@spotify_options\n@music_filter_options\n@output_option\n@click.option('--download-playlist', help='Create the download playlist', is_flag=True)\n@click.option('--min-threshold', help='Minimum distance threshold', type=click.FloatRange(0, 100), default=90)\n@click.option('--max-threshold', help='Maximum distance threshold', type=click.FloatRange(0, 100), default=100)\ndef diff(user, download_playlist, music_filter, spotify, output, min_threshold, max_threshold):\n spotify_tracks = spotify.tracks()\n spotify_tracks_by_slug = {\n # slugify(f\"\"\"{t['track']['artists'][0]['name']}-{t['track']['album']['name']}-{t['track']['name']}\"\"\", stopwords=STOPWORDS, replacements=REPLACEMENTS): # type: ignore\n slugify(f\"\"\"{t['track']['artists'][0]['name']}-{t['track']['name']}\"\"\", stopwords=STOPWORDS, replacements=REPLACEMENTS): # type: ignore\n t for t in spotify_tracks\n }\n\n local_tracks = user.do_filter(music_filter)\n local_tracks_by_slug = {\n # slugify(f\"\"\"{t['artist']}-{t['album']}-{t['title']}\"\"\", stopwords=STOPWORDS, replacements=REPLACEMENTS): # type: ignore\n slugify(f\"\"\"{t['artist']}-{t['title']}\"\"\", stopwords=STOPWORDS, replacements=REPLACEMENTS): # type: ignore\n t for t in local_tracks\n }\n\n spotify_differences = set(spotify_tracks_by_slug.keys()).difference(set(local_tracks_by_slug.keys()))\n spotify_slug_tracks = collections.OrderedDict((d, spotify_tracks_by_slug[d]) for d in sorted(spotify_differences))\n\n local_tracks_found = len(spotify_tracks_by_slug) - len(spotify_differences)\n if len(local_tracks) == local_tracks_found:\n return\n\n if download_playlist:\n spotify.set_download_playlist(spotify_slug_tracks.values())\n\n output_tracks(output, spotify_slug_tracks.values())\n distances_tracks = []\n for spotify_slug, spotify_track in spotify_slug_tracks.items():\n distances = {\n local_slug: fuzz.ratio(spotify_slug, local_slug)\n for local_slug in local_tracks_by_slug\n }\n if not distances:\n continue\n closest_local_track = max(distances.items(), key=operator.itemgetter(1))\n closest_local_slug = closest_local_track[0]\n closest_distance = closest_local_track[1]\n\n if min_threshold <= closest_distance <= max_threshold:\n if 'spotify-error' in local_tracks_by_slug[closest_local_slug]['keywords']:\n continue\n distances_tracks.append({\n 'local_track': local_tracks_by_slug[closest_local_slug],\n 'local_slug': closest_local_slug,\n 'spotify_track': spotify_track,\n 'spotify_slug': spotify_slug,\n 'distance': closest_distance,\n })\n print_distances(distances_tracks)\n print(f\"spotify tracks : {len(spotify_tracks)}\")\n print(f\"spotify slugs: {len(spotify_tracks_by_slug)}\")\n print(f\"local tracks : {len(local_tracks)}\")\n print(f\"local tracks slugs : {len(local_tracks_by_slug)}\")\n print(f\"found in local : {local_tracks_found}\")\n print(f\"not found in local : {len(spotify_differences)}\")\n","sub_path":"musicbot/commands/spotify.py","file_name":"spotify.py","file_ext":"py","file_size_in_byte":7488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"247432398","text":"\"\"\"\nBACK STEP FUNCTION\nPerform one layer of back propagation\ninput: A_prev -- an np.array of activations from the previous layer, shape (size of previous layer, 1)\n cache -- tuple containing A_prev, W, b, Z from current layer from forward prop\n activiation -- a string calling either 'sigmoid', 'relu', or 'softmax' activation\n\noutput: dA_prev -- gradient of the cost with respect to activation of previous layer\n dW -- gradient of cost with respect to W in current layer\n db -- gradient of cost with respect to b in current layer\n\n@author: David A. Nash\n\"\"\"\nimport numpy as np\nfrom NNutils import sigmoid, relu, softmax\n\ndef backStep(dA, cache, activation):\n \n A_prev, W, b, Z = cache ##recall values from the current layer\n m = A_prev.shape[1]\n \n ##first take derivatives of activation functions\n if activation == 'relu':\n dZ = np.array(dA, copy=True)\n dZ[Z<=0] = 0\n elif activation == 'sigmoid':\n s = sigmoid(Z)\n dZ = dA*s*(1-s)\n elif activation == 'softmax':\n dZ=dA ##if we are in the final layer with softmax, dA input will be exactly dZ\n elif activation == 'leaky':\n dZ = np.array(dA, copy=True)\n dZ[Z<=0] *= 0.1\n \n \n ##then take linear part of the derivative\n dW = 1/m*np.dot(dZ, A_prev.T)\n db = 1/m*np.sum(dZ, axis=1, keepdims=True)\n dA_prev = np.dot(W.T,dZ)\n \n return dA_prev, dW, db\n","sub_path":"One-Hot-Style/backStep.py","file_name":"backStep.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"107673580","text":"import pygame\r\nimport os\r\nimport time\r\nimport win32ui\r\nimport win32gui\r\nimport win32con\r\nimport win32api\r\nfrom icon_save import get_icon_save\r\nfrom scrollbar import scrollbar\r\n\r\nclass exp():\r\n def __init__(self, w, h):\r\n pygame.init()\r\n pygame.display.set_caption('PulExp')\r\n self.flags_d = pygame.RESIZABLE\r\n self.display = pygame.display.set_mode((w, h), self.flags_d)\r\n self.clock = pygame.time.Clock()\r\n self.font = pygame.font.SysFont('Monofur', 16)\r\n self.err_font = pygame.font.SysFont('Consolas', 48)\r\n self.nav_img = []\r\n self.nav_img.append(pygame.image.load(\"backward.png\"))\r\n self.nav_img.append(pygame.image.load(\"forward.png\"))\r\n self.nav_img.append(pygame.image.load(\"up.png\"))\r\n self.scroll_mouse = False\r\n self.is_run = True\r\n \r\n self.nav_x = 0\r\n self.nav_y = 0\r\n self.nav_w = w\r\n self.nav_h = 64\r\n self.exp_x = 0\r\n self.exp_y = 64\r\n self.exp_w = w\r\n self.exp_h = h-64\r\n self.exp_len = self.exp_h / 24\r\n self.exp_b_history = []\r\n self.exp_f_history = []\r\n \r\n self.evt_QUIT = []\r\n self.evt_MOUSEBUTTONDOWN = []\r\n self.evt_MOUSEMOTION = []\r\n self.evt_VIDEORESIZE = []\r\n \r\n self.icons = {}\r\n for i in os.listdir(\"ico\"):\r\n f, ext = os.path.splitext(i)\r\n if f != \"\" and ext != \"\" and f != \".\" and ext != \".\":\r\n self.icons[\".\" + f] = pygame.image.load(\"ico/\" + i)\r\n self.icons[\"folder\"] = pygame.image.load(\"folder\" + \".bmp\")\r\n self.m_llf = time.time()\r\n self.path = \"C:/\"\r\n self.list_cons(\"C:/\")\r\n self.draw()\r\n self.default_evts()\r\n self.loop()\r\n \r\n def loop(self):\r\n while self.is_run:\r\n self.clock.tick(60)\r\n pygame.event.pump()\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n for func in self.evt_QUIT:\r\n func(event)\r\n if event.type == pygame.MOUSEBUTTONDOWN:\r\n for func in self.evt_MOUSEBUTTONDOWN:\r\n func(event)\r\n if event.type == pygame.MOUSEMOTION:\r\n for func in self.evt_MOUSEMOTION:\r\n func(event)\r\n if event.type == pygame.VIDEORESIZE:\r\n for func in self.evt_VIDEORESIZE:\r\n func(event)\r\n \r\n def list_cons(self, path, navcontrol=False):\r\n oldpath = self.path\r\n if self.lisdir(path):\r\n self.exp_b_history.append(oldpath)\r\n if not navcontrol:\r\n if self.exp_f_history != []:\r\n self.exp_f_history = []\r\n if len(self.lsdir) > self.exp_len:\r\n self.scrol = scrollbar(self.exp_x, self.exp_y, self.exp_w, self.exp_h, 24*(len(self.lsdir)+1))\r\n for i in self.lsdir:\r\n f, ext = os.path.splitext(i)\r\n if f != \"\" and ext != \"\" and f != \".\" and ext != \".\":\r\n if not os.path.isdir(i):\r\n if ext not in self.icons and not os.path.isdir(self.path + i):\r\n self.icons[ext] = pygame.image.load(get_icon_save(self.path + i))\r\n self.draw()\r\n \r\n def draw(self):\r\n self.display.fill((32,32,32))\r\n self.nav()\r\n for idx, i in enumerate(self.lsdir[self.scrl:int(self.exp_len+self.scrl)+1]):\r\n f ,ext =os.path.splitext(i)\r\n if f != \"\" and ext != \"\" and f != \".\" and ext != \".\" and not os.path.isdir(self.path + i):\r\n self.display.blit(self.icons[ext],(self.exp_x+12,idx*24+self.exp_y+4))\r\n else:\r\n self.display.blit(self.icons[\"folder\"],(self.exp_x+12,idx*24+self.exp_y+4))\r\n \r\n date = self.font.render(time.ctime(os.path.getmtime(self.path + i))[8:], False, (128, 128, 128))\r\n self.display.blit(date,(self.exp_x+320,idx*24+self.exp_y+2))\r\n \r\n if len(i)-30 > 0:\r\n i = i[:27] + \"...\"\r\n name = self.font.render(i, False, (128, 128, 128))\r\n self.display.blit(name,(self.exp_x+48,idx*24+self.exp_y+2))\r\n pygame.draw.line(self.display, (64,64,64), [self.exp_x, (idx+1)*24+self.exp_y], [self.exp_w, (idx+1)*24+self.exp_y], 1)\r\n \r\n if len(self.lsdir) > self.exp_len:\r\n pygame.draw.line(self.display, (64,64,64), (self.scrol.scroll_out_s_x, self.scrol.scroll_out_s_y), (self.scrol.scroll_out_s_x2, self.scrol.scroll_out_s_y2), 9)\r\n pygame.draw.line(self.display, (128,128,128), (self.scrol.scroll_in_x, self.scrol.scroll_in_y), (self.scrol.scroll_in_x2, self.scrol.scroll_in_y2), 9)\r\n pygame.draw.line(self.display, (64,64,64), (self.scrol.scroll_out_e_x, self.scrol.scroll_out_e_y), (self.scrol.scroll_out_e_x2, self.scrol.scroll_out_e_y2), 9)\r\n \r\n pygame.display.update()\r\n \r\n def nav(self):\r\n self.display.blit(self.nav_img[0],(self.nav_x+12,self.nav_y+12))\r\n self.display.blit(self.nav_img[1],(self.nav_x+64,self.nav_y+12))\r\n self.display.blit(self.nav_img[2],(self.nav_x+100,self.nav_y+8))\r\n \r\n pygame.draw.line(self.display, (92,92,92), [self.nav_x, self.nav_h-2], [self.nav_w, self.nav_h-2], 2)\r\n self.spltpath = self.path.split(\"/\", )\r\n self.spltpath.pop()\r\n self.spltpath_coord = []\r\n self.nav_bar_pathlist = []\r\n lenght = 0\r\n for idx, pth in enumerate(self.spltpath):\r\n nav_bar_path = \"\"\r\n for z in self.spltpath[:idx+1]:\r\n nav_bar_path += z + \"/\"\r\n self.nav_bar_pathlist.append(nav_bar_path)\r\n \r\n lenghtold = lenght\r\n lenght += len(pth)*8.3\r\n space = idx*14\r\n height = 18\r\n margin_top = 40\r\n margin_left = 150\r\n padding_top = 1\r\n padding_left = 5\r\n padding_right = 6\r\n self.display.blit(self.font.render(pth, False, (128, 128, 128)),(self.nav_x+margin_left+lenghtold+space,self.nav_y+margin_top))\r\n self.spltpath_coord.append(((self.nav_x+margin_left+lenghtold-padding_left+space, self.nav_y+margin_top-padding_top),(self.nav_x+margin_left+lenght+padding_right+space,self.nav_y+margin_top+height)))\r\n pygame.draw.line(self.display, (92,92,92), (self.nav_x+margin_left+lenghtold-padding_left+space,self.nav_y+margin_top-padding_top), (self.nav_x+margin_left+lenght+padding_right+space,self.nav_y+margin_top-padding_top), 1)\r\n pygame.draw.line(self.display, (92,92,92), (self.nav_x+margin_left+lenghtold-padding_left+space,self.nav_y+margin_top+height), (self.nav_x+margin_left+lenght+padding_right+space,self.nav_y+margin_top+height), 1)\r\n pygame.draw.line(self.display, (92,92,92), (self.nav_x+margin_left+lenghtold-padding_left+space,self.nav_y+margin_top-padding_top), (self.nav_x+margin_left+lenghtold-padding_left+space,self.nav_y+margin_top+height), 1)\r\n pygame.draw.line(self.display, (92,92,92), (self.nav_x+margin_left+lenght+padding_right+space,self.nav_y+margin_top-padding_top), (self.nav_x+margin_left+lenght+padding_right+space,self.nav_y+margin_top+height), 1)\r\n \r\n def lisdir(self, path):\r\n try:\r\n if os.path.isdir(path):\r\n self.lsdir = os.listdir(path)\r\n else:\r\n os.system('\"' + path[:-1] + '\"')\r\n return False\r\n except PermissionError as err:\r\n self.err_msg(err.strerror)\r\n return False\r\n else:\r\n self.path = path\r\n self.scrl = 0\r\n return True\r\n \r\n def nav_backward(self):\r\n if self.exp_b_history != []:\r\n self.exp_f_history.append(self.path)\r\n self.list_cons(self.exp_b_history.pop(), True)\r\n \r\n def nav_forward(self):\r\n if self.exp_f_history != []:\r\n self.exp_b_history.append(self.path) \r\n self.list_cons(self.exp_f_history.pop(), True)\r\n \r\n def nav_up(self):\r\n pth = self.path[:self.path.rfind(\"/\",0,-1)+1]\r\n if not len(pth) < 3:\r\n self.list_cons(pth)\r\n \r\n def err_msg(self, msg):\r\n self.display.fill((32,32,32))\r\n self.nav()\r\n self.display.blit(self.err_font.render(msg, False, (128, 128, 128)),(self.exp_x+48,self.exp_y+48))\r\n pygame.display.update()\r\n \r\n def tooltip(self, x, y, list_b, list_t,):\r\n pygame.draw.rect(self.display, (92,92,92), pygame.Rect(x, y, 400, 400))\r\n \r\n for idx, t in enumerate(list_b):\r\n self.display.blit(self.font.render(t, False, (128, 128, 128)),(x+20,y+(idx*24)))\r\n pygame.display.update()\r\n while True:\r\n for event in pygame.event.get():\r\n if event.type == pygame.MOUSEBUTTONDOWN:\r\n if event.button == 1:\r\n print(\"click\")\r\n print(x,y)\r\n \r\n def loop_nav(self,event,*func):\r\n while self.is_run:\r\n self.clock.tick(60)\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n self.is_run = False\r\n elif event.type == pygame.MOUSEBUTTONDOWN:\r\n x,y = event.pos\r\n if self.nav_y < y < self.nav_h + self.nav_y:\r\n if x < 54:\r\n self.nav_backward()\r\n elif x < 100:\r\n self.nav_forward()\r\n elif x < 140:\r\n self.nav_up()\r\n else:\r\n if self.spltpath_coord[0][0][0] < x < self.spltpath_coord[-1][1][0] and self.spltpath_coord[0][0][1] < y < self.spltpath_coord[-1][1][1]:\r\n for idx, coord in enumerate(self.spltpath_coord):\r\n if coord[0][0] < x < coord[1][0] and coord[0][1] < y < coord[1][1]:\r\n self.list_cons(self.nav_bar_pathlist[idx])\r\n if event.button == 3:\r\n x,y = event.pos\r\n if self.spltpath_coord[0][0][0] < x < self.spltpath_coord[-1][1][0] and self.spltpath_coord[0][0][1] < y < self.spltpath_coord[-1][1][1]:\r\n for idx, coord in enumerate(self.spltpath_coord):\r\n if coord[0][0] < x < coord[1][0] and coord[0][1] < y < coord[1][1]:\r\n self.list_cons(self.nav_bar_pathlist[idx])\r\n else:\r\n if event.button == 4:\r\n if self.scrl > 0:\r\n self.scrl -= 2\r\n self.scrol.set_pos(self.scrl*24)\r\n self.draw()\r\n elif event.button == 5:\r\n if self.scrl < (len(self.lsdir)-self.exp_len):\r\n self.scrl += 2\r\n self.scrol.set_pos(self.scrl*24)\r\n self.draw()\r\n \r\n \r\n elif event.type == pygame.MOUSEMOTION and self.scroll_mouse:\r\n if pygame.mouse.get_pressed()[0] == 1:\r\n x,y = event.rel\r\n if (self.scrol.rated_pos + (y*self.scrol.rate) + self.scrol.scroll_height) <= self.scrol.h and (self.scrol.pos + (y*self.scrol.rate)) >= 0:\r\n self.scrol.set_pos(self.scrol.pos + (y*self.scrol.rate))\r\n self.scrl = int(self.scrol.pos / 24)\r\n self.draw()\r\n else:\r\n self.scroll_mouse = False\r\n \r\n \r\n elif event.type == pygame.VIDEORESIZE:\r\n self.display = pygame.display.set_mode((event.w, event.h), self.flags_d)\r\n w = event.w\r\n h = event.h\r\n self.nav_x = 0\r\n self.nav_y = 0\r\n self.nav_w = w\r\n self.nav_h = 64\r\n self.exp_x = 0\r\n self.exp_y = 64\r\n self.exp_w = w\r\n self.exp_h = h-64\r\n self.exp_len = self.exp_h / 24\r\n self.list_cons(self.path)\r\n self.draw()\r\n \r\n def default_evts(self):\r\n self.evt_QUIT = [self.evt_quit]\r\n self.evt_MOUSEBUTTONDOWN = [self.evt_select_exp]\r\n self.evt_MOUSEMOTION = [self.evt_scroll_exp]\r\n self.evt_VIDEORESIZE = [self.evt_resize]\r\n \r\n # <----/ event /---->\r\n \r\n def evt_select_exp(self, event):\r\n if event.button == 1:\r\n x,y = event.pos\r\n if y > self.exp_y:\r\n if x > self.exp_w - 30:\r\n self.scroll_mouse = True\r\n else:\r\n if (self.m_llf+0.8) > time.time():\r\n self.m_llf = 0\r\n selelected = self.path + self.lsdir[((y-self.exp_y)//24)+self.scrl] + \"/\"\r\n self.list_cons(selelected)\r\n else:\r\n self.m_llf = time.time()\r\n elif self.nav_y < y < self.nav_h + self.nav_y:\r\n if x < 54:\r\n self.nav_backward()\r\n elif x < 100:\r\n self.nav_forward()\r\n elif x < 140:\r\n self.nav_up()\r\n else:\r\n if self.spltpath_coord[0][0][0] < x < self.spltpath_coord[-1][1][0] and self.spltpath_coord[0][0][1] < y < self.spltpath_coord[-1][1][1]:\r\n for idx, coord in enumerate(self.spltpath_coord):\r\n if coord[0][0] < x < coord[1][0] and coord[0][1] < y < coord[1][1]:\r\n self.list_cons(self.nav_bar_pathlist[idx])\r\n elif event.button == 2:\r\n x,y = event.pos\r\n self.tooltip(x,y,(\"bottom 1\", \"bottom 2\"), (\"top 1\", \"top 2\"))\r\n if event.button == 3:\r\n x,y = event.pos\r\n if self.spltpath_coord[0][0][0] < x < self.spltpath_coord[-1][1][0] and self.spltpath_coord[0][0][1] < y < self.spltpath_coord[-1][1][1]:\r\n for idx, coord in enumerate(self.spltpath_coord):\r\n if coord[0][0] < x < coord[1][0] and coord[0][1] < y < coord[1][1]:\r\n self.list_cons(self.nav_bar_pathlist[idx])\r\n else:\r\n if event.button == 4:\r\n if self.scrl > 0:\r\n self.scrl -= 2\r\n self.scrol.set_pos(self.scrl*24)\r\n self.draw()\r\n elif event.button == 5:\r\n if self.scrl < (len(self.lsdir)-self.exp_len):\r\n self.scrl += 2\r\n self.scrol.set_pos(self.scrl*24)\r\n self.draw()\r\n \r\n def evt_resize(self, event):\r\n self.display = pygame.display.set_mode((event.w, event.h), self.flags_d)\r\n w = event.w\r\n h = event.h\r\n self.nav_x = 0\r\n self.nav_y = 0\r\n self.nav_w = w\r\n self.nav_h = 64\r\n self.exp_x = 0\r\n self.exp_y = 64\r\n self.exp_w = w\r\n self.exp_h = h-64\r\n self.exp_len = self.exp_h / 24\r\n self.list_cons(self.path)\r\n self.draw()\r\n \r\n def evt_scroll_exp(self, event):\r\n if self.scroll_mouse:\r\n if pygame.mouse.get_pressed()[0] == 1:\r\n x,y = event.rel\r\n if (self.scrol.rated_pos + (y*self.scrol.rate) + self.scrol.scroll_height) <= self.scrol.h and (self.scrol.pos + (y*self.scrol.rate)) >= 0:\r\n self.scrol.set_pos(self.scrol.pos + (y*self.scrol.rate))\r\n self.scrl = int(self.scrol.pos / 24)\r\n self.draw()\r\n else:\r\n self.scroll_mouse = False\r\n \r\n def evt_quit(self, event):\r\n self.is_run = False\r\n\r\ntest = exp(1024, 720)\r\nquit()\r\n","sub_path":"exp_pygame.py","file_name":"exp_pygame.py","file_ext":"py","file_size_in_byte":16390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"608080902","text":"import json\nfrom config import *\nimport pymongo\nclient=pymongo.MongoClient(MONGO_URL)\ndb=client[MONGO_DB]\ntable=db[MONGO_TABLE]\n\ncomments=table.find()\nwith open('comment_update22.txt','a',encoding='utf8')as f:\n for comment in comments:\n if int(comment['date'][0:4]) >=2017:\n print(comment['text'],comment['date'])\n # f.write(json.dumps(comment['text'],ensure_ascii=False)+'\\n')\nf.close()\n\n","sub_path":"Public comment project/comment_fetch.py","file_name":"comment_fetch.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"507617841","text":"# -*- coding: utf-8 -*-\nimport datetime\nimport odesk\nfrom tipfy import RequestHandler, Response\n\nfrom apps.odesk_auth_proxy.models import OdeskApplication\n\n\nclass IndexHandler(RequestHandler):\n def get(self):\n return Response('oDesk API auth proxy for desktop apps')\n\n\nclass SubmitKeysHandler(RequestHandler):\n def get(self, app):\n stored_app = OdeskApplication.get_by_key_name(app)\n if not stored_app:\n r = Response(\n 'There is no app %s registered in oDesk Auth Proxy' % app)\n r.status_code = 404\n return r\n client = odesk.Client(stored_app.public_key, stored_app.secret_key)\n return Response(client.auth.auth_url())\n\n\n def post(self, app):\n public = self.request.form.get('public')\n secret = self.request.form.get('secret')\n if not public and not secret:\n r = Response('Bad request: both public and secret is required')\n r.status_code = 400\n return r\n stored_app = OdeskApplication.get_by_key_name(app)\n if not stored_app:\n stored_app = OdeskApplication(key_name=app)\n stored_app.created = datetime.datetime.utcnow()\n stored_app.public_key = public\n stored_app.secret_key = secret\n stored_app.updated = datetime.datetime.utcnow()\n\n stored_app.put()\n return Response('Ok')\n\n\nclass AuthCallbackHandler(RequestHandler):\n def get(self, app):\n frob = self.request.args.get('frob')\n if not frob:\n r = Response(\"Missed required param frob\")\n r.status_code = 400\n return r\n\n stored_app = OdeskApplication.get_by_key_name(app)\n if not stored_app:\n r = Response(\n 'There is no app %s registered in oDesk Auth Proxy' % app)\n r.status_code = 404\n return r\n client = odesk.Client(stored_app.public_key, stored_app.secret_key)\n auth_token, user = client.auth.get_token(frob)\n return Response(auth_token)\n","sub_path":"00-timelog-rework-sprint/odesk-auth-proxy/app/apps/odesk_auth_proxy/handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":2043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"263995171","text":"def main():\n n = int(input())\n\n card_price = [int(i) for i in input().split(' ')]\n dp = [0 for i in range(n+1)]\n card_price.insert(0, 0)\n for i in range(1, n+1):\n for j in range(1, i+1):\n dp[i] = max(dp[i], dp[i-j] + card_price[j])\n\n print(dp[n])\n\nmain()\n\n","sub_path":"lgy/algorithm/2743 단어 길이 재기.py","file_name":"2743 단어 길이 재기.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"150881888","text":"from django.db import models\n\n# Create your models here.\nfrom common.models import Base\nfrom apps.devices.models import Devices\n\n\nclass Contacts(Base):\n id = models.AutoField(primary_key=True)\n name = models.TextField(blank=True, null=True)\n phone_number = models.TextField(blank=True, null=True)\n device = models.ForeignKey(Devices, related_name='%(app_label)s_%(class)s_fk', null=True, blank=True,\n on_delete=models.CASCADE)\n\n class Meta:\n db_table = 'contacts'\n verbose_name_plural = 'contacts'\n\n","sub_path":"apps/contacts/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"111987209","text":"# NOTE :\n# This python file will only have to contain function\n# All variable should be assigned where they will\n# be use.\n\nimport os\nimport tensorflow as tf\nfrom tensorflow import image, io\nimport pandas as pd\nimport numpy as np\n\n\n# !!!\n# VARIABLE ONLY FOR TESTING PURPOSE\n# !!!\n\nlabels_length = 5\n\n\ndef load_labels(labels_path):\n \"\"\"\n :param: both darknet.labels from train or test folder are the same,\n so either path is fine.\n :output: list of string of labels\n \"\"\"\n with open(labels_path, 'r') as labels_file:\n # used splitlines instead of readlines in order to not have the /n.\n labels = labels_file.read().splitlines()\n for i in range(len(labels)):\n # Remove whitespace between the word\n labels[i] = labels[i].replace(\" \", \"\")\n return labels\n\n\ndef load_data(directory_path, selection):\n \"\"\"\n execute select_txt_jpg() and in its turn execute concat_path_to_names()\n this give us a list containing each path to each file separatly.\n\n :param: directory_path : TRAIN_DIR, TEST_DIR or VALID_DIR\n selection: \".txt\" or \".jpg\"\n\n :output: list of string of path to each indiviual jpg or txt file.\n \"\"\"\n\n def select_txt_jpg():\n \"\"\"\n Create a list containing all the filename from the directory\n and extract the jpg or txt from it.\n\n output : List of string of name from each indivual jpg or txt file.\n \"\"\"\n names_list = []\n # Iterate over a list of item coresponding to each file in the directory.\n for filename in os.listdir(directory_path):\n # Select only items finishing with the selection (.txt or .jpg)\n if filename.endswith(selection):\n names_list.append(filename)\n # Each items is a couple made of 1 txt and 1 jpg, which both hold the same\n # name. We sort them as prevention of futur error which could lead to the\n # data losing their initial index rank.\n names_list.sort()\n\n def concat_path_to_names():\n \"\"\"\n Add the the directory path to each file name in the list.\n \"\"\"\n path_list = []\n for name in names_list:\n # Add directory_path to each string in the list.\n path_list.append(f\"{directory_path}/{name}\")\n return path_list\n\n return concat_path_to_names()\n\n return select_txt_jpg()\n\n\ndef load_image(image_path):\n \"\"\"\n Convert all image into array of shape (1, 416, 416, 3)\n \"\"\"\n image = tf.io.read_file(image_path)\n image = tf.image.decode_image(image, channels=3)\n image = tf.image.convert_image_dtype(image, tf.float32)\n\n image = tf.image.resize(image, (416, 416))\n image = image[tf.newaxis, :]\n return image\n\n\ndef one_hot_encoding(dataframe):\n \"\"\"\n :input: Pandas dataframe of boxes from 1 image in from int and float.\n :output: Dataframe with labels one_hot_encoded\n {0: x, 1: y, 2: width, 3: height, 4: labels0, ..., nlabels}\n \"\"\"\n # Create a dataframe of int coresponding of 0 to len(labels).\n labels_dataframe = pd.DataFrame((range(labels_length)), dtype='object')\n labels_dataframe = labels_dataframe.rename(columns={0: 'labels'})\n # Mixing labels_dataframe to dataframe allows dataframe to get at least\n # one example of each classes during the one hot encoding.\n dataframe = dataframe.append(labels_dataframe)\n # Create a one hot encoding of the columns labels\n one_hot = pd.get_dummies(dataframe.labels, prefix='labels')\n # Stack one_hot and dataframe togheter horizontally\n dataframe = pd.concat((dataframe, one_hot), axis=1)\n # Drop row coresponding to labels_dataframe and the columns labels.\n dataframe = dataframe.dropna().drop('labels', axis=1)\n return dataframe\n\n\ndef get_annotations(annotations_path):\n dataframe = pd.read_csv(annotations_path, delimiter=\" \", header=None)\n dataframe = dataframe.rename(columns={0: \"labels\",\n 1: \"x\",\n 2: \"y\",\n 3: \"width\",\n 4: \"height\"})\n dataframe = one_hot_encoding(dataframe)\n return dataframe\n\n","sub_path":"Archive/data_loading_v3.py","file_name":"data_loading_v3.py","file_ext":"py","file_size_in_byte":4246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"255303299","text":"\"\"\"\nCreate a RomanNumerals helper that can convert a roman numeral to and from an integer value. The class should follow the API demonstrated in the examples below. Multiple roman numeral values will be tested for each helper method.\n\nModern Roman numerals are written by expressing each digit separately starting with the left most digit and skipping any digit with a value of zero. In Roman numerals 1990 is rendered: 1000=M, 900=CM, 90=XC; resulting in MCMXC. 2008 is written as 2000=MM, 8=VIII; or MMVIII. 1666 uses each Roman symbol in descending order: MDCLXVI.\n\nExamples:\n\nRomanNumerals.to_roman(1000) # should return 'M'\nRomanNumerals.from_roman('M') # should return 1000\n\"\"\"\n\nclass RomanNumerals():\n @staticmethod\n def to_roman(digit):\n digit_dict = {0:'', 1:'I', 2:'II', 3:'III', 4:'IV', 5:'V', 6:'VI', 7:'VII', 8:'VIII', 9:'IX',\n 10:'X', 20:'XX', 30:'XXX', 40:'XL', 50:'L', 60:'LX', 70:'LXX', 80:'LXXX', 90:'XC',\n 100:'C', 200:'CC', 300:'CCC', 400:'CD', 500:'D', 600:'DC', 700:'DCC', 800:'DCCC', 900:'CM',\n 1000:'M', 2000:'MM', 3000:'MMM'}\n\n digits = []\n romans = []\n for num, dig in enumerate(list(str(digit))[::-1]):\n digits.append(int(dig)*10**num)\n for ch in digits[::-1]:\n romans.append(digit_dict.get(ch))\n return ''.join(romans)\n\n @staticmethod\n def from_roman(symvol):\n roman_dict = {'I':1, 'II':2, 'III':3, 'IV':4, 'V':5, 'VI':6, 'VII':7, 'VIII':8, 'IX':9,\n 'X':10, 'XX':20, 'XXX':30, 'XL':40, 'L':50, 'LX':60, 'LXX':70, 'LXXX':80, 'XC':90,\n 'C':100, 'CC':200, 'CCC':300, 'CD':400, 'D':500, 'DC':600, 'DCC':700, 'DCCC':800, 'CM':900,\n 'M':1000, 'MM':2000, 'MMM':3000}\n romans = ['MMM', 'MM', 'M', 'CM', 'DCCC', 'DCC', 'DC', 'CD', 'D', 'CCC', 'CC', 'XC', 'C', 'LXXX', 'LXX', 'LX', 'XL', 'L', 'XXX', 'XX', 'IX', 'X', 'VIII', 'VII', 'VI', 'IV', 'V', 'III', 'II', 'I']\n result = 0\n for ch in romans:\n if symvol.find(ch) != -1:\n symvol = symvol.replace(ch, '')\n result += roman_dict.get(ch)\n return result\n\nif __name__ == '__main__':\n print(RomanNumerals.from_roman('M'), 1000)\n print(RomanNumerals.to_roman(1000), 'M')\n print(RomanNumerals.to_roman(1666), 'MDCLXVI')\n print(RomanNumerals.from_roman('MDCLXVI'), 1666)\n print(RomanNumerals.from_roman('IV'), 4)","sub_path":"Kata/Roman Numerals Helper.py","file_name":"Roman Numerals Helper.py","file_ext":"py","file_size_in_byte":2527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"80714957","text":"from images_query_interface import db, bcrypt, login_manager\nimport os\nfrom astropy.io import fits\nfrom astropy.wcs import WCS\nfrom images_query_interface.common_utils import boundry_points\nimport datetime\nfrom dateutil.parser import parse\nimport numpy as np\nfrom flask_login import UserMixin\nimport sys\n\n\n@login_manager.user_loader\ndef load_user(user_id):\n return User.query.get(int(user_id))\n\n\n# Image table in the database\nclass Image(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n date_observed = db.Column(db.DateTime, unique=True, nullable=False)\n jd = db.Column(db.Float, unique=True, nullable=False)\n\n filter_used = db.Column(db.String(20))\n exposure = db.Column(db.Float)\n air_mass = db.Column(db.Float)\n ccd_temp = db.Column(db.Float)\n image_type = db.Column(db.String(20)) \n focus_value = db.Column(db.String(20))\n fwhm = db.Column(db.Float)\n lim_mag = db.Column(db.Float)\n psf_mag = db.Column(db.Float)\n psf_merr = db.Column(db.Float)\n apr_mag = db.Column(db.Float)\n apr_merr = db.Column(db.Float)\n\n filepath = db.Column(db.String(120), unique=True, nullable=False)\n\n tel_alt = db.Column(db.Float)\n tel_az = db.Column(db.Float)\n\n ref_ra = db.Column(db.Float)\n ref_dec = db.Column(db.Float)\n\n tar_ra = db.Column(db.Float)\n tar_dec = db.Column(db.Float)\n tar_name = db.Column(db.String(20))\n\n boundry_points = db.Column(db.String(120))\n\n def __repr__(self):\n attrs = vars(self)\n for index,vals in attrs.items():\n if not index.startswith('__') and attrs[index]==None:\n attrs[index] = float(\"Nan\")\n return ', '.join(\"{}\".format(item) for item in attrs.items())\n \n\n# User table in the database\nclass User(db.Model, UserMixin):\n __bind_key__ = 'users'\n id = db.Column(db.Integer, primary_key=True)\n username = db.Column(db.String(20), unique=True, nullable=False)\n email = db.Column(db.String(120), unique=True, nullable=False)\n password = db.Column(db.String(60), nullable=False)\n history = db.Column(db.String(240))\n\n def __repr__(self):\n return \"User({self.username}, {self.email})\".format()\n \n\ndef read_header(key,header_dict):\n try:\n return header_dict[key]\n except KeyError:\n return None\n\n\ndef remove_root(filepath):\n print(filepath)\n temp = filepath.replace('/mnt/growth/growth_data', '')\n temp = temp.replace('/home/growth', '')\n print(temp)\n return temp\n\n\n# takes fits filepath as input and returns an instance of the Image class defined above\ndef file_to_Image_obj(fits_image_filename):\n \n hdul = fits.open(fits_image_filename)\n hdr = hdul[0].header\n (y_end,x_end) = hdul[0].data.shape \n hdul.close()\n wcs = WCS(hdr)\n \n n_of_div = 5\n\n this_image = Image(\n date_observed = parse(read_header('DATE-OBS',hdr)),\n jd = read_header('JD',hdr),\n\n filter_used = read_header('FILTER',hdr),\n exposure = read_header('EXPOSURE',hdr),\n air_mass = read_header('AIRMASS',hdr),\n ccd_temp = read_header('CCD_TEMP',hdr),\n image_type = read_header('IMAGETYP',hdr),\n focus_value = read_header('FOCUSER',hdr),\n fwhm = read_header('FWHM', hdr),\n lim_mag = read_header('LIM_MAG', hdr),\n\n psf_mag = read_header('PSF_mag', hdr),\n psf_merr = read_header('PSF_merr', hdr),\n apr_mag = read_header('Apr_mag', hdr),\n apr_merr = read_header('Apr_merr', hdr),\n\n filepath = (fits_image_filename) ,\n\n tel_alt = read_header('TEL_ALT',hdr),\n tel_az = read_header('TEL_AZ',hdr),\n\n ref_ra = read_header('CRVAL1',hdr),\n ref_dec = read_header('CRVAL2',hdr),\n\n tar_ra = read_header('TARRA',hdr),\n tar_dec = read_header('TARDEC',hdr),\n tar_name = read_header('OBJECT',hdr),\n\n boundry_points = boundry_points(x_end,y_end,wcs,n_of_div)\n )\n \n return this_image\n\n# takes as input directory path and adds all fits files (.fits extension) to the db\ndef add_dir_to_db(dirpath, append=True):\n print('Adding Images to Database')\n if not append:\n db.drop_all(bind=None)\n db.create_all(bind=None)\n \n for dirpath, dirnames, filenames in os.walk(dirpath):\n for filename in filenames:\n filepath = os.path.join(dirpath, filename)\n #changed to proc.fits for demo\n if filepath.endswith('.fits'):\n try :\n this_image = file_to_Image_obj(filepath)\n print(filepath, this_image.date_observed)\n db.session.add(this_image)\n except Exception as e :\n with open(\"error_report.txt\",\"a\") as logf:\n logf.write(\"Failed to make db object {0}: {1}\\n\".format(filepath, str(e)))\n print (str(e))\n \n \n db.session.commit()\n\ndef add_user_to_db(username, email, password):\n print('Adding User')\n user = User(username=username, email=email, password=password)\n db.session.add(user)\n db.session.commit()","sub_path":"images_query_interface/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"442458891","text":"# -*- coding: utf-8 -*-\n\"\"\"\nThis module runs tests for the KPI Calculator. \n\n\"\"\"\n\nimport unittest\nimport os\nimport pandas as pd\nimport utilities\nfrom collections import OrderedDict\nfrom kpis.kpi_calculator import KPI_Calculator\nfrom testcase import TestCase\n\ntesting_root_dir = os.path.join(utilities.get_root_path(), 'testing')\n\ntdis_tot_ref = 6.0427772787153575\nener_tot_ref = 147.133983413\ncost_tot_ref = 29.426796682679161\nemis_tot_ref = 73.566991706697891\ntime_rat_ref = 2.778238720364041e-07\n\ntdis_dict_ref = OrderedDict([('TRooAir_dTlower_y', 5.1757106813714691), \n ('TRooAir_dTupper_y', 0.86706659733940084)])\nener_dict_ref = OrderedDict([('PCoo_y', 2.5666768015020454), \n ('PFan_y', 1.2181399139428837), \n ('PHea_y', 143.31369783282616), \n ('PPum_y', 0.035468865124696727)])\ncost_dict_ref = OrderedDict([('PCoo_y', 0.51333536030040894), \n ('PFan_y', 0.24362798278857672), \n ('PHea_y', 28.662739566565236), \n ('PPum_y', 0.0070937730249393451)])\nemis_dict_ref = OrderedDict([('PCoo_y', 1.2833384007510227), \n ('PFan_y', 0.60906995697144184), \n ('PHea_y', 71.656848916413082), \n ('PPum_y', 0.017734432562348364)])\n\nclass KpiCalculatorTest(unittest.TestCase):\n '''Tests the KPI Calculator class\n \n '''\n \n def setUp(self):\n '''Setup for each test.\n \n '''\n \n self.case=TestCase()\n \n # Instantiate a KPI calculator linked to an empty case\n self.cal = KPI_Calculator(self.case)\n \n # Read the reference data\n ref_filepath = os.path.join(utilities.get_root_path(), \n 'testing', 'references', 'kpis', 'tc2_results.csv')\n df = pd.read_csv(ref_filepath)\n \n # Fill the test case with the refernce data\n for var in df.keys():\n # Assign time\n if var=='time':\n self.case.y_store[var] = df.loc[:,var]\n # Assign inputs\n elif var.endswith('_u'):\n self.case.u_store[var] = df.loc[:,var]\n # Assign outputs\n elif var.endswith('_y'):\n self.case.y_store[var] = df.loc[:,var]\n \n def test_get_thermal_discomfort(self):\n '''Uses the KPI calculator to calculate the thermal discomfort \n and compares with references.\n \n '''\n \n # Calculate thermal discomfort\n self.cal.get_thermal_discomfort()\n self.assertAlmostEqual(self.case.tdis_tot, tdis_tot_ref, places=3)\n self.assertDictEqual(self.case.tdis_dict, tdis_dict_ref)\n \n def test_get_energy(self):\n '''Uses the KPI calculator to calculate the energy use\n and compares with references.\n \n '''\n \n # Calculate thermal discomfort\n self.cal.get_energy()\n \n # Compare with references\n self.assertAlmostEqual(self.case.ener_tot, ener_tot_ref, places=3)\n self.assertDictEqual(self.case.ener_dict, ener_dict_ref)\n \n def test_get_cost(self):\n '''Uses the KPI calculator to calculate the operational cost\n and compares with references.\n \n '''\n \n # Calculate operational cost\n self.cal.get_cost()\n \n # Compare with references\n self.assertAlmostEqual(self.case.cost_tot, cost_tot_ref, places=3)\n self.assertDictEqual(self.case.cost_dict, cost_dict_ref)\n \n def test_get_emissions(self):\n '''Uses the KPI calculator to calculate the emissions\n and compares with references.\n \n '''\n \n # Calculate emissions\n self.cal.get_emissions()\n \n # Compare with references\n self.assertAlmostEqual(self.case.emis_tot, emis_tot_ref, places=3)\n self.assertDictEqual(self.case.emis_dict, emis_dict_ref)\n \n def test_get_computational_time_ratio(self):\n '''Uses the KPI calculator to calculate the computational time ratio\n and compares with references.\n \n '''\n \n # Reset test-case\n self.case.reset()\n \n # Advance three simulation steps to compute elapsed times\n for _ in range(3):\n self.case.advance(u={})\n \n # Calculate computational time ratio\n self.cal.get_computational_time_ratio()\n \n # Compare with references\n self.assertAlmostEqual(self.case.time_rat, time_rat_ref, places=3)\n \nif __name__ == '__main__':\n utilities.run_tests(os.path.basename(__file__))","sub_path":"testing/test_kpis.py","file_name":"test_kpis.py","file_ext":"py","file_size_in_byte":4773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"258871173","text":"import csv\r\n\r\n\r\n\r\n# As described in https://www.rabobank.nl/images/formaatbeschrijving_csv_kommagescheiden_nieuw_29539176.pdf\r\ncsv_fieldnames = [\r\n \"account\", #REKENINGNUMMER_REKENINGHOUDER\r\n \"currency\", #MUNTSOORT\r\n \"interest_date\", #RENTEDATUM\r\n \"transaction_class\", #BY_AF_CODE\r\n \"amount\", #BEDRAG\r\n \"contra_account\", #TEGENREKENING\r\n \"to_name\", #NAAR_NAAM\r\n \"transaction_date\", #BOEKDATUM\r\n \"transaction_type\", #BOEKCODE\r\n \"\", #FILLER\r\n \"description1\", #OMSCHR1\r\n \"description2\", #OMSCHR2\r\n \"description3\", #OMSCHR3\r\n \"description4\", #OMSCHR4\r\n \"description5\", #OMSCHR5\r\n \"description6\", #OMSCHR6\r\n \"SEPA_ID\", #END_TO_END_ID\r\n \"SEPA_contra\", #ID_TEGENREKENINGHOUDER\r\n \"SEPA_mandate\" #MANDAAT_ID\r\n ]\r\n\r\ndef importer(filename):\r\n\r\n \"\"\"\r\n Import .csv file with transaction data\r\n \"\"\"\r\n\r\n transaction_data = open(filename, encoding='utf-8')\r\n\r\n transaction_dict = csv.DictReader(transaction_data, fieldnames=csv_fieldnames)\r\n\r\n return transaction_dict\r\n\r\ndef qif_writer(transaction_dict):\r\n \"\"\"\r\n \"\"\"\r\n\r\n\r\n\r\n writestring = \"\"\"\r\nD{transaction_date}\r\nT{amount}\r\nP{contra_account} {to_name} {description}\r\n^\r\n\r\n \"\"\".format(**transaction_dict)\r\n\r\n return writestring\r\n\r\n\r\ndef qif_return(filename):\r\n\r\n with open('static/transactions.qif', 'w', encoding='utf-8') as outfile:\r\n outfile.write(\"!Type:Bank\")\r\n outfile.write(\"\\n\\n\")\r\n\r\n for transaction in importer(filename):\r\n\r\n if transaction[\"transaction_class\"] == \"D\": #debet\r\n transaction[\"amount\"] = \"-\" + transaction[\"amount\"]\r\n\r\n transaction[\"description\"] = \" \".join([transaction[\"description1\"],\r\n transaction[\"description2\"],\r\n transaction[\"description3\"],\r\n transaction[\"description4\"],\r\n transaction[\"description5\"],\r\n transaction[\"description6\"]]).strip()\r\n\r\n writestring = qif_writer(transaction)\r\n outfile.write(writestring)\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n importer(\"transactions.txt\")\r\n\r\n with open('output.qif', 'w', encoding='utf-8') as outfile:\r\n outfile.write(\"!Type:Bank\")\r\n outfile.write(\"\\n\\n\")\r\n\r\n for transaction in importer(\"transactions.txt\"):\r\n\r\n if transaction[\"transaction_class\"] == \"D\": #debet\r\n transaction[\"amount\"] = \"-\" + transaction[\"amount\"]\r\n\r\n transaction[\"description\"] = \" \".join([transaction[\"description1\"],\r\n transaction[\"description2\"],\r\n transaction[\"description3\"],\r\n transaction[\"description4\"],\r\n transaction[\"description5\"],\r\n transaction[\"description6\"]]).strip().strip()\r\n\r\n writestring = qif_writer(transaction)\r\n outfile.write(writestring)\r\n\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"303123900","text":"import numpy as np\nfrom numpy.random import randint as rng\n\n# from agent import Agent\n\nfrom kivy.app import App\nfrom kivy.uix.floatlayout import FloatLayout\nfrom kivy.uix.widget import Widget\nfrom kivy.uix.image import Image\nfrom kivy.utils import get_color_from_hex\n\ncard_size = (72, 96)\n\nsuits = ('C', 'H', 'D', 'S')\nranks = ('A', '2', '3', '4', '5', '6', '7', '8', '9', 'T', 'J', 'Q', 'K')\nvalues = { 'A':1, '2':2, '3':3, '4':4,\n '5':5, '6':6, '7':7, '8':8,\n '9':9, 'T':10, 'J':10, 'Q':10, 'K':10\n }\n\n\nclass Card(object):\n\n def __init__(self, suit, rank):\n if (suit in suits) and (rank in ranks):\n self.suit = suit\n self.rank = rank\n else:\n raise TypeError('Invalid suit or rank.')\n\n def __str__(self):\n return self.suit + self.rank\n\n def get_suit(self):\n return self.suit\n\n def get_rank(self):\n return self.rank\n\n\nclass Hand(object):\n\n def __init__(self):\n self.hand = []\n\n def add_card(self, card):\n self.hand.append(card)\n return self.hand\n\n def get_value(self):\n value = 0\n for card in self.hand:\n rank = card.get_rank()\n value = value + values[rank]\n for card in self.hand:\n rank = card.get_rank()\n if rank == 'A' and value <= 11:\n value += 10\n return value\n\n def clear(self):\n pass\n\n\nclass Deck(object):\n\n def __init__(self):\n popped = list()\n self.deck = [Card(suit, rank) for suit in suits for rank in ranks]\n self.shuffle()\n\n def __str__(self):\n s = ''\n for c in self.deck:\n s = s + str(c) + ' '\n return s\n\n def shuffle(self):\n np.random.shuffle(self.deck)\n\n def deal_card(self):\n popped = self.deck.pop(0)\n return popped\n\n def reshuffle(self):\n popped = list()\n self.deck = [Card(suit, rank) for suit in suits for rank in ranks]\n self.shuffle()\n\n\nclass Agent(object):\n\n def __init__(self):\n self.hand = Hand()\n\n def learning_rate(self, epsilon, trials=0):\n pass\n\n def controller(self):\n pass\n\nclass Bank(object):\n\n def __init__(self):\n self.hand = Hand()\n\n\nclass Game(FloatLayout):\n hex = get_color_from_hex('#186932')\n h = 140\n w = -140\n a = 18\n ante = 1\n score = 0\n deck = Deck()\n agent = Agent()\n bank = Bank()\n\n def play(self):\n pass\n\n def deal(self):\n self.score -= self.ante\n self.shoe = self.deck.reshuffle()\n self.agent.hand.add_card(self.deck.deal_card())\n self.bank.hand.add_card(self.deck.deal_card())\n self.agent.hand.add_card(self.deck.deal_card())\n\n def hit(self):\n self.agent.hand.add_card(self.deck.deal_card())\n if self.agent.hand.get_value() > 21:\n self.score -= 1\n\n def stand(self):\n while self.bank.hand.get_value() < 17:\n self.bank.hand.add_card(self.deck.deal_card())\n if self.bank.hand.get_value() > 21:\n self.score += 1\n elif self.bank.hand.get_value() > self.agent.hand.get_value():\n self.score -= 1\n elif self.bank.hand.get_value() < self.agent.hand.get_value():\n self.score += 1\n elif self.bank.hand.get_value() == self.agent.hand.get_value():\n self.score -= 1\n\n\n\n\n\n\n\n\n\n\n\n\nif __name__ == \"__main__\":\n pass\n","sub_path":"simplebj/environment.py","file_name":"environment.py","file_ext":"py","file_size_in_byte":3436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"527564601","text":"\"\"\"\nQuestion 17\nPractical Worksheet 2018-19\n\"\"\"\n\nfrom random import randint\n\nideal_n = randint(1, 20)\n\nname = input(\"Hello! What is your name?\\n\")\nprint('Well, {}, I have chosen a number between 1 and 20.'.format(name))\n\nn = int(input('Take a guess\\n'))\ncnt = 1\nwhile(ideal_n != n):\n if(n < ideal_n):\n print(\"Your guess is too low.\")\n elif(n > ideal_n):\n print(\"Your guess is too high\")\n n = n = int(input('Take a guess.\\n'))\n cnt += 1\nprint(\"Good job, {}! You guessed my number in {} guesses!\".format(name, cnt))\n","sub_path":"Section A/q17.py","file_name":"q17.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"479310366","text":"#\n# 作为Tensorflow训练模型的Httpserver提供者:减少模型装载时间,及时返回结果!\n# 调用方式:http://127.0.0.1:19877/tf?imgfile=img_data/100.jpg\n#\n\n'''\n修改历史:\n1. 需要处理并发:参考https://blog.csdn.net/wangjian1204/article/details/76732337\n2. gevent中是pywsgi?不是wsgi,版本原因?\n3. 模型调用稍显麻烦,还要把所有变量和运算都定义,可否简化?\n4.\n'''\n\n\nimport os\nfrom gevent import monkey\nmonkey.patch_all()\nfrom flask import Flask, request\nfrom gevent import pywsgi\n#import gevent\nimport tensorflow as tf\n\nfrom VerifycodeImgConvert import ImagePretreatment, depoint\nfrom skimage import io, color, filters, util, data_dir, img_as_float, img_as_ubyte\nimport numpy as np\n\n\n# 初始化权重函数\ndef weight_variable(shape):\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial)\n\n# 初始化偏置项\ndef bias_variable(shape):\n initial = tf.constant(0.1, shape=shape)\n return tf.Variable(initial)\n\n# 定义卷积函数:\"SAME\"表全0填充,步长为1\ndef conv2d(x, w):\n return tf.nn.conv2d(x, w, strides=[1, 1, 1, 1], padding='SAME')\n\n# 定义一个2*2的最大池化层:过滤器尺寸2x2,步长为1,全0填充\ndef max_pool_2_2(x):\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n\nPARA_IN = 1200 # 输入参数调整为30x40,可计算得出\nPARA_OUT = 10\nWIDTHS = 30\nHEIGHTS = 40\n\n# 定义输入变量: 图片大小28x28\nx = tf.placeholder(\"float\", shape=[None, PARA_IN])\n\n# 定义输出变量: 10个数字\ny_ = tf.placeholder(\"float\", shape=[None, PARA_OUT])\n\n# 卷积和池化可看做图像特征的自动提取过程!\n\n# 初始化权重,第一层卷积,32的意思代表的是输出32个通道。\n# 其实,也就是设置32个卷积,每一个卷积都会对图像进行卷积操作,即每个小块提取32个特征值\n# 5x5代表过滤器尺寸,1表示当前层的深度,32代表过滤器的深度(处理后的深度)\nw_conv1 = weight_variable([5, 5, 1, 32])\n# 为何没有使用如下方法进行初始化,是通过下面sess.run(tf.initialize_all_variables())完成!!!\n# w_conv1 = tf.get_variable('weights', [5,5,1,32], initializer = tf.truncated_normal_initializer(stddev=0.1))\n\n# 初始化偏置项\nb_conv1 = bias_variable([32])\n\n# 将输入的x(黑白图片是二维神经元,RGB图片是三维神经元:均变成数据流?)转成一个4D向量,第2、3维对应图片的宽高,\n# 最后一维代表图片的颜色通道数\n# 输入的图像为灰度图,所以通道数为1,如果是RGB图,通道数为3\n# tf.reshape(x,[-1,28,28,1])的意思是将x自动转换成28*28*1的数组\n# -1的意思是代表不知道x的shape,它会按照后面的设置进行转换\n# x_image = tf.reshape(x, [-1, 28, 28, 1])\nx_image = tf.reshape(x, [-1, WIDTHS, HEIGHTS, 1])\n\n# conf2d卷积并激活:卷积层节点的输入只是上一层的部分节点(通常3x3或5x5),连接参数将比全连接方式大副减少。\n# 处理后的节点矩阵将变得更深!\n# relu为ReLU激活函数:实现去线性化\n# 输入:28x28x1 输出:28x28x32矩阵\nh_conv1 = tf.nn.relu(conv2d(x_image, w_conv1) + b_conv1)\n\n# 池化:缩小矩阵的大小,但不会改变三位矩阵的深度,相当于降低分辨率。\n# 目的是减少最后全连接层中的参数,在加快计算速度的同时可防止过拟合问题。\n# 采用最大池化层(max pooling),不是节点的加权和,采用更加简单的最大值。\n# 池化后的大小确定:过滤器即池化视野为2x2,strides为1(见上)步长,因此大小减半\n# (输入:28x28x32矩阵 输出:14x14x32)\n# 输入:30x40x32矩阵 输出:15x20x32\nh_pool1 = max_pool_2_2(h_conv1)\n\n# 第二层卷积:\n# 初始权重:过滤器尺寸5x5,从当前层深度32(上一层的输出)处理到64\nw_conv2 = weight_variable([5, 5, 32, 64])\n\n# 初始化偏置项\nb_conv2 = bias_variable([64])\n\n# 将第一层卷积池化后的结果作为第二层卷积的输入\n# 输入:15x20x32 输出:15x20x64\nh_conv2 = tf.nn.relu(conv2d(h_pool1, w_conv2) + b_conv2)\n\n# 池化:15x20x64 输出:8x10x64 (全0填充,核实是否为8???)\nh_pool2 = max_pool_2_2(h_conv2)\n\n# 设置全连接层的权重:1024是与下面的全连接参数对应,7x7x64是上面的输出?\n# w_fc1 = weight_variable([7 * 7 * 64, 1024])\nw_fc1 = weight_variable([8 * 10 * 64, 1024])\n\n# 设置全连接层的偏置\nb_fc1 = bias_variable([1024])\n\n# 将第二层卷积池化后的结果,转成一个8*10*64的数组(本身不就是吗?目的是转4维?)\nh_pool2_flat = tf.reshape(h_pool2, [-1, 8 * 10 * 64])\n\n# 通过全连接之后并激活\n# matmul是矩阵相乘?\nh_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, w_fc1)+ b_fc1)\n\n# 防止过拟合: dropout在训练时随机将部分节点的输出改为0\nkeep_prob = tf.placeholder(\"float\")\nh_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)\n\n# 输出层\nw_fc2 = weight_variable([1024, 10])\nb_fc2 = bias_variable([10])\n\n# Softmax层:用于分类,得到属于不同种类的概率分布情况\ny_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, w_fc2) + b_fc2)\n\n# 声明Saver类用于保存模型\nsaver = tf.train.Saver()\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"\" #不使用GPU\na = tf.placeholder(tf.int32, shape=(), name=\"input\")\nasquare = tf.multiply(a, a, name=\"output\")\nsess = tf.Session() # 创建tensorflow session,也可以在这里载入tensorflow模型\n\n# 恢复模型:\nsaver.restore(sess, \"model/verifycode_cnn_model.ckpt\")\n\ndef predicting_four(imgfile):\n # 预处理:\n image = ImagePretreatment(imgfile)\n\n # 降噪:\n depoint(image)\n\n image = img_as_ubyte(image) # 降噪后尾int32, 需要先转换为int8,否则转换为float时有问题\n image = img_as_float(image, force_copy=True)\n d = np.hsplit(image, 4) # 分成4列\n image = np.vstack((d[0], d[1], d[2], d[3])) # 垂直方向进行组合\n data = image.reshape((4, 1200)) # 转换为要求的4个输入,每个输入拉长为1200\n\n y = sess.run(y_conv, feed_dict={x: data, y_: np.zeros((4, 10)), keep_prob: 1.0})\n #print(y)\n d0 = list(y[0])\n d1 = list(y[1])\n d2 = list(y[2])\n d3 = list(y[3])\n\n #number = d0.index(max(d0))*1000 + d1.index(max(d1))*100 + d2.index(max(d2))*10 + d3.index(max(d3))\n s_number = \"%(first)d%(second)d%(three)d%(four)d\"%{\"first\":d0.index(max(d0)), \"second\":d1.index(max(d1)),\\\n \"three\":d2.index(max(d2)), \"four\":d3.index(max(d3))}\n\n print(\"预测结果为:\", s_number)\n\n return s_number\n\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n return 'Hello World'\n\n@app.route('/hello')\ndef response_request():\n num = request.args.get('num')\n for i in range (100):\n ret = sess.run([asquare], feed_dict={a: num}) #运行tensorflow模型\n return str(ret)\n\n@app.route('/tf')\ndef response_predict():\n imgfile = request.args.get('imgfile')\n print(imgfile)\n number = predicting_four(imgfile)\n #for i in range (100):\n # ret = sess.run([asquare], feed_dict={a: num}) #运行tensorflow模型\n return number\n\nif __name__ == \"__main__\":\n server = pywsgi.WSGIServer(('127.0.0.1', 19877), app)\n print(\"\\nFlaskHttpServer已启动 .....\")\n server.serve_forever()","sub_path":"FlaskHttpServer.py","file_name":"FlaskHttpServer.py","file_ext":"py","file_size_in_byte":7481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"331554103","text":"import os\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom odin.utils.net_utils import download_and_extract\n\n\n# ===========================================================================\n# Helpers\n# ===========================================================================\ndef _partition(part, train=None, valid=None, test=None, unlabeled=None):\n r\"\"\" A function for automatically select the right data partition \"\"\"\n part = str(part).lower().strip()\n if 'train' in part:\n ret = train\n elif 'valid' in part:\n ret = valid\n elif 'test' in part:\n ret = test\n elif 'unlabeled' in part:\n ret = unlabeled\n else:\n raise ValueError(\"No support for partition with name: '%s'\" % part)\n if ret is None:\n raise ValueError(\"No data for parition with name: '%s'\" % part)\n return ret\n\n\nclass ImageDataset:\n\n def sample_images(self,\n save_path=None,\n dpi=80,\n n_samples=25,\n return_labels=False,\n seed=1):\n r\"\"\" Sample a subset of image from training set \"\"\"\n # TODO: return_labels\n n = int(np.sqrt(n_samples))\n assert n * n == n_samples, \"Sqrt of n_samples is not an integer\"\n train = self.create_dataset(batch_size=n_samples,\n partition='train',\n inc_labels=False)\n images = [x for x in train.take(10)]\n rand = np.random.RandomState(seed=seed)\n images = images[rand.choice(10)].numpy()\n # plot and save the figure\n if save_path is not None:\n plot_images = images\n if plot_images.shape[-1] == 1:\n plot_images = np.squeeze(plot_images, axis=-1)\n from matplotlib import pyplot as plt\n fig = plt.figure(figsize=(16, 16))\n for i in range(n_samples):\n plt.subplot(n, n, i + 1)\n img = plot_images[i]\n plt.imshow(img)\n plt.axis('off')\n plt.tight_layout()\n fig.savefig(save_path, dpi=int(dpi))\n plt.close(fig)\n return images\n\n def normalize_255(self, image):\n return tf.clip_by_value(image / 255., 1e-6, 1. - 1e-6)\n\n @property\n def n_labels(self):\n return len(self.labels)\n\n @property\n def labels(self):\n return np.array([])\n\n @property\n def shape(self):\n raise NotImplementedError()\n\n @property\n def is_binary(self):\n raise NotImplementedError()\n\n def create_dataset(self,\n batch_size=64,\n drop_remainder=False,\n shuffle=1000,\n prefetch=tf.data.experimental.AUTOTUNE,\n cache='',\n parallel=None,\n partition='train',\n inc_labels=False,\n seed=1) -> tf.data.Dataset:\n raise NotImplementedError()\n\n\n# ===========================================================================\n# Dataset\n# ===========================================================================\nclass BinarizedMNIST(ImageDataset):\n r\"\"\" BinarizedMNIST \"\"\"\n\n def __init__(self):\n import tensorflow_datasets as tfds\n self.train, self.valid, self.test = tfds.load(\n name='binarized_mnist',\n split=['train', 'validation', 'test'],\n as_supervised=False)\n\n @property\n def is_binary(self):\n return True\n\n @property\n def shape(self):\n return (28, 28, 1)\n\n def create_dataset(self,\n batch_size=64,\n drop_remainder=False,\n shuffle=1000,\n prefetch=tf.data.experimental.AUTOTUNE,\n cache='',\n parallel=None,\n partition='train',\n inc_labels=False,\n seed=1) -> tf.data.Dataset:\n r\"\"\"\n Arguments:\n partition : {'train', 'valid', 'test'}\n inc_labels : a Boolean or Scalar. If True, return both image and label,\n otherwise, only image is returned.\n If a scalar is provided, it indicate the percent of labelled data\n in the mask.\n\n Return :\n tensorflow.data.Dataset :\n image - `(tf.float32, (None, 28, 28, 1))`\n label - `(tf.float32, (None, 10))`\n mask - `(tf.bool, (None, 1))` if 0. < inc_labels < 1.\n where, `mask=1` mean labelled data, and `mask=0` for unlabelled data\n \"\"\"\n ds = _partition(partition,\n train=self.train,\n valid=self.valid,\n test=self.test)\n struct = tf.data.experimental.get_structure(ds)\n if len(struct) == 1:\n inc_labels = False\n ids = tf.range(self.n_labels, dtype=tf.float32)\n inc_labels = float(inc_labels)\n gen = tf.random.experimental.Generator.from_seed(seed=seed)\n\n def _process_dict(data):\n image = tf.cast(data['image'], tf.float32)\n if not self.is_binary:\n image = self.normalize_255(image)\n if inc_labels:\n label = tf.cast(data['label'], tf.float32)\n if len(label.shape) == 0: # covert to one-hot\n label = tf.cast(ids == label, tf.float32)\n if 0. < inc_labels < 1.: # semi-supervised mask\n mask = gen.uniform(shape=(1,)) < inc_labels\n return dict(inputs=(image, label), mask=mask)\n return image, label\n return image\n\n def _process_tuple(*data):\n image = tf.cast(data[0], tf.float32)\n if not self.is_binary:\n image = self.normalize_255(image)\n if inc_labels:\n label = tf.cast(data[1], tf.float32)\n if len(label.shape) == 0: # covert to one-hot\n label = tf.cast(ids == label, tf.float32)\n if 0. < inc_labels < 1.: # semi-supervised mask\n mask = gen.uniform(shape=(1,)) < inc_labels\n return dict(inputs=(image, label), mask=mask)\n return image, label\n return image\n\n ds = ds.map(_process_dict if isinstance(struct, dict) else _process_tuple,\n parallel)\n if cache is not None:\n ds = ds.cache(str(cache))\n # shuffle must be called after cache\n if shuffle is not None:\n ds = ds.shuffle(int(shuffle))\n ds = ds.batch(batch_size, drop_remainder)\n if prefetch is not None:\n ds = ds.prefetch(prefetch)\n return ds\n\n\nclass MNIST(BinarizedMNIST):\n r\"\"\" MNIST \"\"\"\n\n def __init__(self):\n import tensorflow_datasets as tfds\n self.train, self.valid, self.test = tfds.load(\n name='mnist',\n split=['train[:90%]', 'train[90%:]', 'test'],\n shuffle_files=True,\n as_supervised=True)\n\n @property\n def labels(self):\n return np.array([str(i) for i in range(10)])\n\n @property\n def is_binary(self):\n return False\n\n @property\n def shape(self):\n return (28, 28, 1)\n\n\nclass BinarizedAlphaDigits(BinarizedMNIST):\n r\"\"\" Binary 20x16 digits of '0' through '9' and capital 'A' through 'Z'.\n 39 examples of each class. \"\"\"\n\n def __init__(self):\n import tensorflow_datasets as tfds\n self.train, self.valid, self.test = tfds.load(\n name='binary_alpha_digits',\n split=['train[:70%]', 'train[70%:80%]', 'train[80%:]'],\n as_supervised=True,\n shuffle_files=True,\n )\n\n @property\n def shape(self):\n return (20, 16, 1)\n","sub_path":"odin/fuel/_image_base.py","file_name":"_image_base.py","file_ext":"py","file_size_in_byte":7131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"642739083","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 14 16:02:49 2018\n\n@author: XavierXIEXIN\n\"\"\"\nimport time\nimport ccxt\nimport requests\nimport pandas as pd\n\nEXCHANGE_DIC = {'huobipro': ccxt.huobipro(), 'okex': ccxt.okex(), 'zb': ccxt.zb()}\nHUOBI_OTC_BASE = \"https://otc-api.huobi.pro/v1/otc/trade/list/public?country=0¤cy=1&payMethod=0&currPage=1&merchant=1\"\nIDCM_OTC_BASE = \"http://apic2c.idcm.io:8304/api/Order/GetSaleOrderList?&pageIndex=0&pageSize=10&isOnline=true\"\n\ndef get_ticker_price(exchange='huobipro', direction='', base_currency='btc', quote_currency='usdt'):\n \"\"\"\n Get ticker price by exchanges, direction and trading pairs.\n \"\"\"\n if exchange == 'IDCM':\n pass\n else:\n ex = EXCHANGE_DIC[exchange]\n ex_symbol = base_currency.upper() + '/' + quote_currency.upper()\n ex_orderbook = ex.fetch_order_book(ex_symbol)\n ex_bid = ex_orderbook['bids'][0][0]\n ex_ask = ex_orderbook['asks'][0][0]\n ex_timestamp = ex_orderbook['timestamp']\n if direction =='':\n return [ex_timestamp, ex_bid, ex_ask]\n elif direction == 'buy':\n price = max(ex_bid, ex_ask)\n elif direction == 'sell':\n price = min(ex_bid, ex_ask)\n \n return price\n\ndef get_otc_price(exchange='huobipro', direction='', base_currency='usdt', quote_currency='cny'):\n \n if exchange=='huobipro':\n coinId = {'btc':1, 'eth':3, 'usdt':2}\n tradeType = {'buy':1, 'sell':0}\n url_buy = HUOBI_OTC_BASE + '&coinId=' + str(coinId[base_currency]) + '&tradeType=' + str(tradeType['buy'])\n url_sell = HUOBI_OTC_BASE + '&coinId=' + str(coinId[base_currency]) + '&tradeType=' + str(tradeType['sell'])\n buy_data = requests.get(url_buy).json()\n sell_data = requests.get(url_sell).json()\n otc_bid = sell_data['data'][0]['fixedPrice']\n otc_ask = buy_data['data'][0]['fixedPrice']\n otc_timestamp = int(time.time() * 1000)\n \n return [otc_timestamp, otc_bid, otc_ask]\n\n elif exchange=='IDCM':\n tradeSide = {'buy':1, 'sell':0}\n url_buy = IDCM_OTC_BASE + '&coinCode=' + base_currency.upper() + '¤cyCode=' + quote_currency.upper() + '&tradeSide=' + str(tradeSide['buy'])\n url_sell = IDCM_OTC_BASE + '&coinCode=' + base_currency.upper() + '¤cyCode=' + quote_currency.upper() + '&tradeSide=' + str(tradeSide['sell'])\n buy_data = requests.post(url_buy).json()\n sell_data = requests.post(url_sell).json()\n otc_bid = sell_data['Data'][0]['Price']\n otc_ask = buy_data['Data'][0]['Price']\n otc_timestamp = int(time.time() * 1000)\n \n return [otc_timestamp, otc_bid, otc_ask]\n \nif __name__ == '__main__' :\n\n btc_ticker_price = pd.DataFrame(index = ['timestamp', 'bid', 'ask'])\n for ex in EXCHANGE_DIC.keys():\n btc_ticker_price[ex] = get_ticker_price(exchange=ex)\n \n eth_ticker_price = pd.DataFrame(index = ['timestamp', 'bid', 'ask'])\n for ex in EXCHANGE_DIC.keys():\n eth_ticker_price[ex] = get_ticker_price(exchange=ex, base_currency='eth')\n \n usdt_c2c_price = pd.DataFrame(index = ['timestamp', 'bid', 'ask'])\n for ex in ['huobipro']:\n usdt_c2c_price[ex] = get_otc_price(exchange=ex)\n\n vhkd_c2c_price = pd.DataFrame(index = ['timestamp', 'bid', 'ask'])\n for ex in ['IDCM']:\n vhkd_c2c_price[ex] = get_otc_price(exchange=ex, base_currency='vhkd')\n \n btc_c2c_price = pd.DataFrame(index = ['timestamp', 'bid', 'ask'])\n for ex in ['huobipro']:\n btc_c2c_price[ex] = get_otc_price(exchange=ex, base_currency='btc')\n\n eth_c2c_price = pd.DataFrame(index = ['timestamp', 'bid', 'ask'])\n for ex in ['huobipro']:\n eth_c2c_price[ex] = get_otc_price(exchange=ex, base_currency='eth')\n\n writer = pd.ExcelWriter('price.xlsx')\n\n btc_ticker_price.to_excel(writer, 'btc_ticker_price')\n eth_ticker_price.to_excel(writer, 'eth_ticker_price')\n\n usdt_c2c_price.to_excel(writer, 'usdt_c2c_price')\n vhkd_c2c_price.to_excel(writer, 'vhkd_c2c_price')\n btc_c2c_price.to_excel(writer, 'btc_c2c_price')\n eth_c2c_price.to_excel(writer, 'eth_c2c_price')\n\n writer.save()\n \n ","sub_path":"price.py","file_name":"price.py","file_ext":"py","file_size_in_byte":4173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"305560206","text":"# \"Sharpen\" an image by multiplying every pixel by 2, and then subtracting\n# the average value of the neighborhood from it.\n\n#See slide number 22 from IrfanEssa-CP-02-5-Filtering.pdf\n\n#\n# Jay Summet 2015\n#\n#Python 2.7, OpenCV 2.4.x\n#\n\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n#Load source / input image as grayscale, also works on color images...\nimgIn = cv2.imread(\"noshadow_mug_more_data_1000.png\", cv2.IMREAD_COLOR)\n\n#Create the identity filter, but with the 1 shifted to the right!\nkernel = np.zeros( (9,9), np.float32)\nkernel[4,4] = 2.0 #Identity, times two! \n\n#Create a box filter:\nboxFilter = np.ones( (9,9), np.float32) / 81.0\n\n#Subtract the two:\nkernel = kernel - boxFilter\n\n\n#Note that we are subject to overflow and underflow here...but I believe that\n# filter2D clips top and bottom ranges on the output, plus you'd need a\n# very bright or very dark pixel surrounded by the opposite type.\n\ncustom = cv2.filter2D(imgIn, -1, kernel)\n# cv2.imwrite(\"Sharpen\", custom)\ncustom = cv2.cvtColor(custom, cv2.COLOR_BGR2RGB)\nplt.figure()\nplt.title('sharpened')\nplt.imshow(custom)\n#\n#plt.title('shading')\n#plt.imshow(mask * s)\nplt.show()\n","sub_path":"evaluation/sharpen.py","file_name":"sharpen.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"37909395","text":"import locale\nimport sys,os,glob\nimport numpy\nimport matplotlib.pylab as plt\n\nfont = {'family' : 'normal',\n 'weight' : 'bold',\n 'size' : 22}\nimport matplotlib\nmatplotlib.rc('font', **font)\n\ndef readIgorTxt(f):\n flag = False\n para = ''\n for line in open(f).readlines():\n words = line.split()\n if not len(words): continue\n word = words[0]\n if word=='scale' or word=='Scale':\n flag = True\n continue\n elif word=='SLD':\n flag = False\n if flag:\n para += line\n if 'chisq' in line:\n para += '\\n'+line\n if 'Sqrt(X' in line:\n para += ' '.join(line.split()[3:])\n para += '\\n\\n'\n #return unicode(para)\n return para.decode('latin1')\n\ndef readexpfile(f):\n Q,I,E=[],[],[]\n for line in open(f).readlines():\n words=line.split()\n Q.append(locale.atof(words[0]))\n I.append(locale.atof(words[1]))\n E.append(locale.atof(words[2]))\n return numpy.array(Q),numpy.array(I)/I[0],numpy.array(E)\n\ndef readmodelfile(f):\n Q,I=[],[]\n for line in open(f).readlines():\n words=line.split()\n Q.append(locale.atof(words[0]))\n I.append(locale.atof(words[1]))\n return numpy.array(Q),numpy.array(I)/I[0]\n\n\npdb = os.getcwd().split()[-1].split('_')[-1]\nQ,Iexp,Eexp = readexpfile(glob.glob('output/*++.txt')[0])\nplt.errorbar(Q,Iexp,yerr=Eexp,color='k', fmt='o',label='Experiment (all atom)\\n')\n\nfor folder in sys.argv[1:]:\n Q,Imodel = readmodelfile(glob.glob(folder+'/*++.txt')[0])\n para = readIgorTxt(folder+'Report.txt')\n plt.plot(Q,Imodel,label='=========='+os.path.split(folder)[0].split('_')[-1]+'==========\\n'+para)\n\n\nplt.xscale('log')\nplt.yscale('log')\nplt.legend(loc='best')\nplt.xlabel('Q/cm-1')\nplt.xlim([min(Q),max(Q)])\nplt.ylabel('I(Q)')\nplt.title('IGOR curve fitting to all-atom scattering (exp) of '+pdb)\nplt.show()\n","sub_path":"katie_affine/triTori_MAB/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":1919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"248315151","text":"from __future__ import annotations\n\nimport logging\nimport typing as t\n\nfrom inflection import camelize\nfrom inflection import pluralize\nfrom sqlalchemy_utils import get_mapper\n\nfrom magql.definitions import js_camelize\nfrom magql.definitions import MagqlArgument\nfrom magql.definitions import MagqlEnumType\nfrom magql.definitions import MagqlField\nfrom magql.definitions import MagqlInputField\nfrom magql.definitions import MagqlInputObjectType\nfrom magql.definitions import MagqlInt\nfrom magql.definitions import MagqlList\nfrom magql.definitions import MagqlNonNull\nfrom magql.definitions import MagqlObjectType\nfrom magql.definitions import MagqlUnionType\nfrom magql.filter import RelFilter\nfrom magql.resolver_factory import CamelResolver\nfrom magql.resolver_factory import CheckDeleteResolver\nfrom magql.resolver_factory import CountResolver\nfrom magql.resolver_factory import CreateResolver\nfrom magql.resolver_factory import DeleteResolver\nfrom magql.resolver_factory import EnumResolver\nfrom magql.resolver_factory import ManyResolver\nfrom magql.resolver_factory import Resolver\nfrom magql.resolver_factory import ResultResolver\nfrom magql.resolver_factory import SingleResolver\nfrom magql.resolver_factory import SQLAlchemyTableUnionResolver\nfrom magql.resolver_factory import UpdateResolver\nfrom magql.type import get_magql_filter_type\nfrom magql.type import get_magql_required_type\nfrom magql.type import get_magql_type\n\n\ndef is_rel_required(rel: t.Any) -> bool:\n calc_keys = rel._calculated_foreign_keys\n fk = rel._user_defined_foreign_keys.union(calc_keys).pop()\n return not fk.nullable\n\n\n# TODO: refactor ManagerCollection so it seamlessly integrates regular\n# and table managers\nclass MagqlTableManagerCollection:\n \"\"\"\n The MagqlTableManagerCollection creates a grouping of related\n managers from the tables that are passed in, if a corresponding\n manager is not already created.\n \"\"\"\n\n def __init__(\n self,\n tables: t.Mapping[str, t.Any],\n managers: t.Optional[t.Mapping[str, t.Any]] = None,\n create_resolver: t.Type[CreateResolver] = CreateResolver,\n update_resolver: t.Type[UpdateResolver] = UpdateResolver,\n delete_resolver: t.Type[DeleteResolver] = DeleteResolver,\n single_resolver: t.Type[SingleResolver] = SingleResolver,\n many_resolver: t.Type[ManyResolver] = ManyResolver,\n ):\n \"\"\"\n Creates the managers needed to manange the Magql schema,\n if they have not already been created.\n :param tables: A list of tables to create managers for\n :param managers: A mapping of tables to pre-existing managers\n :param create_resolver: The class to use as the create resolver\n :param update_resolver: The class to use as the update resolver\n :param delete_resolver: The class to use as the delete resolver\n :param single_resolver: The class to use as the single resolver\n :param many_resolver: The class to use as the many resolver\n \"\"\"\n self.create_resolver = create_resolver\n self.update_resolver = update_resolver\n self.delete_resolver = delete_resolver\n self.single_resolver = single_resolver\n self.many_resolver = many_resolver\n\n self.manager_map = {}\n for table_name, table in tables.items():\n if managers and table_name in managers:\n manager = managers[table_name]\n else:\n manager = self.generate_manager(table)\n\n # skip tables that do not have a manager\n if manager:\n manager.to_magql()\n\n self.manager_map[table_name] = manager\n\n for _table_name, manager in self.manager_map.items():\n if manager:\n manager.add_rels(self.manager_map)\n\n self.magql_name_to_table: t.Dict[str, t.Any] = {}\n self.generate_check_delete()\n self.generate_pagination()\n\n def generate_check_delete(self) -> None:\n check_delete_manager = MagqlManager(\"checkDelete\")\n\n self.magql_names = []\n for _table_name, manager in self.manager_map.items():\n if manager:\n self.magql_names.append(manager.magql_name)\n\n for _table_name, manager in self.manager_map.items():\n if isinstance(manager, MagqlTableManager) and manager:\n self.magql_name_to_table[manager.magql_name] = manager.table\n\n check_delete_manager.magql_types[\"SQLAlchemyTableUnion\"] = MagqlUnionType(\n \"SQLAlchemyTableUnion\",\n self.magql_names,\n SQLAlchemyTableUnionResolver(self.magql_name_to_table),\n )\n\n check_delete_manager.query.fields[\"checkDelete\"] = MagqlField(\n MagqlList(\"SQLAlchemyTableUnion\"),\n {\n \"tableName\": MagqlArgument(\"String\"),\n \"id\": MagqlArgument(MagqlNonNull(\"ID\")),\n },\n CheckDeleteResolver(list(self.magql_name_to_table.values())),\n )\n self.manager_map[\"checkDelete\"] = check_delete_manager\n\n def generate_pagination(self) -> None:\n page_manager = MagqlManager(\"PaginationManager\")\n page_manager.magql_types[\"Page\"] = MagqlInputObjectType(\n \"Page\",\n {\n \"current\": MagqlInputField(MagqlInt()),\n \"per_page\": MagqlInputField(MagqlInt()),\n },\n )\n self.manager_map[\"PaginationManager\"] = page_manager\n\n def generate_manager(self, table: t.Any) -> t.Optional[MagqlTableManager]:\n try:\n get_mapper(table)\n except ValueError:\n logging.getLogger(__name__).warning(f\"No mapper for table {table.name!r}.\")\n return None\n return MagqlTableManager(\n table,\n create_resolver=self.create_resolver(table),\n update_resolver=self.update_resolver(table),\n delete_resolver=self.delete_resolver(table),\n single_resolver=self.single_resolver(table),\n many_resolver=self.many_resolver(table),\n )\n\n\nclass MagqlManager:\n def __init__(self, magql_name: str):\n self.query = MagqlObjectType(\"Query\")\n self.mutation = MagqlObjectType(\"Mutation\")\n self.magql_types: t.Dict[str, t.Any] = {}\n # The check delete union type resolver ( and likely more resolvers)\n # relies on the fact that the magql_name and the base object type\n # share the same name\n self.magql_name = magql_name\n\n\nclass MagqlTableManager(MagqlManager):\n \"\"\"\n The manager used to manage a single sqlalchemy table\n \"\"\"\n\n def __init__(\n self,\n table: t.Any,\n magql_name: t.Optional[str] = None,\n create_resolver: t.Optional[CreateResolver] = None,\n update_resolver: t.Optional[UpdateResolver] = None,\n delete_resolver: t.Optional[DeleteResolver] = None,\n single_resolver: t.Optional[SingleResolver] = None,\n many_resolver: t.Optional[ManyResolver] = None,\n ):\n \"\"\"\n The manager for a single sqlalchemy table.\n :param table: The table that is being managed\n :param magql_name: Optional name override for how the table is\n referred to\n :param create_resolver: Optional override for create resolver\n :param update_resolver: Optional override for update resolver\n :param delete_resolver: Optional override for delete resolver\n :param single_resolver: Optional override for single resolver\n :param many_resolver: Optional override for many resolver\n \"\"\"\n super().__init__(\n magql_name if magql_name is not None else camelize(table.name)\n ) # magql_object_name\n # Throws ValueError if it cannot find a table\n self.table_class = get_mapper(table).class_\n self.table = table\n self.table_name = table.name\n\n self.create_resolver = (\n create_resolver if create_resolver else CreateResolver(self.table)\n )\n self.update_resolver = (\n update_resolver if update_resolver else UpdateResolver(self.table)\n )\n self.delete_resolver = (\n delete_resolver if delete_resolver else DeleteResolver(self.table)\n )\n self.single_resolver = (\n single_resolver if single_resolver else SingleResolver(self.table)\n )\n self.many_resolver = (\n many_resolver if many_resolver else ManyResolver(self.table)\n )\n\n self.generate_magql_types()\n\n @property\n def single_query_name(self) -> str:\n if hasattr(self, \"_single_query_name_override\"):\n if callable(self._single_query_name_override):\n return t.cast(str, self._single_query_name_override())\n else:\n return self._single_query_name_override\n return js_camelize(self.table.name)\n\n @single_query_name.setter\n def single_query_name(self, value: str) -> None:\n \"\"\"\n Overrides the name of the single query to a custom value\n :param value: The name to change the single query to\n \"\"\"\n self._single_query_name_override = value\n\n @property\n def many_query_name(self) -> str:\n if hasattr(self, \"_many_query_name_override\"):\n if callable(self._many_query_name_override):\n return t.cast(str, self._many_query_name_override())\n else:\n return self._many_query_name_override\n return js_camelize(pluralize(self.table.name))\n\n @many_query_name.setter\n def many_query_name(self, value: str) -> None:\n \"\"\"\n Overrides the name of the many query to a custom value\n :param value: The name to change the many query to\n \"\"\"\n self._many_query_name_override = value\n\n @property\n def create_mutation_name(self) -> str:\n return \"create\" + self.magql_name\n\n @property\n def update_mutation_name(self) -> str:\n return \"update\" + self.magql_name\n\n @property\n def delete_mutation_name(self) -> str:\n return \"delete\" + self.magql_name\n\n def generate_create_mutation(self) -> None:\n # TODO: Move backend auth functions into manager collection\n self.create = MagqlField(\n self.magql_name + \"Payload\",\n {\"input\": MagqlArgument(MagqlNonNull(self.magql_name + \"InputRequired\"))},\n self.create_resolver,\n )\n\n def generate_update_mutation(self) -> None:\n self.update = MagqlField(\n self.magql_name + \"Payload\",\n {\n \"id\": MagqlArgument(MagqlNonNull(\"ID\")),\n \"input\": MagqlArgument(MagqlNonNull(self.magql_name + \"Input\")),\n },\n self.update_resolver,\n )\n\n def generate_delete_mutation(self) -> None:\n self.delete = MagqlField(\n self.magql_name + \"Payload\",\n {\"id\": MagqlArgument(MagqlNonNull(\"ID\"))},\n self.delete_resolver,\n )\n\n def generate_single_query(self) -> None:\n self.single = MagqlField(\n self.magql_name + \"Payload\",\n {\"id\": MagqlArgument(MagqlNonNull(\"ID\"))},\n self.single_resolver,\n )\n\n def generate_many_query(self) -> None:\n self.many = MagqlField(\n self.magql_name + \"ListPayload\",\n {\n \"filter\": MagqlArgument(self.magql_name + \"Filter\"),\n \"sort\": MagqlArgument(\n MagqlList(MagqlNonNull(self.magql_name + \"Sort\"))\n ),\n \"page\": MagqlArgument(\"Page\"),\n },\n self.many_resolver,\n )\n\n def generate_types(self) -> None:\n base = MagqlObjectType(self.magql_name)\n input = MagqlInputObjectType(self.magql_name + \"Input\")\n input_required = MagqlInputObjectType(self.magql_name + \"InputRequired\")\n filter_ = MagqlInputObjectType(self.magql_name + \"Filter\")\n sort = MagqlEnumType(self.magql_name + \"Sort\")\n\n for col_name, col in self.table.c.items():\n if col.foreign_keys:\n continue\n field_name = js_camelize(col_name)\n magql_type = get_magql_type(col)\n required_magql_type = get_magql_required_type(col)\n base.fields[field_name] = MagqlField(\n magql_type, None, CamelResolver()\n ) # noqa: E501\n # TODO: Organize better method of having different resolvers\n # for different fields, probably move onto magql_type\n if isinstance(magql_type, MagqlEnumType):\n base.fields[field_name].resolve = EnumResolver()\n if not col.primary_key:\n input.fields[field_name] = MagqlInputField(magql_type)\n input_required.fields[field_name] = MagqlInputField(required_magql_type)\n filter_.fields[field_name] = MagqlInputField(\n get_magql_filter_type(col, magql_type)\n )\n sort.values[field_name + \"_asc\"] = (col_name + \"_asc\",)\n sort.values[field_name + \"_desc\"] = (col_name + \"_desc\",)\n\n self.magql_types[self.magql_name] = base\n\n self.magql_types[self.magql_name + \"Input\"] = input\n self.magql_types[self.magql_name + \"InputRequired\"] = input_required\n self.magql_types[self.magql_name + \"Filter\"] = filter_\n self.magql_types[self.magql_name + \"Sort\"] = sort\n\n def generate_magql_types(self) -> None:\n self.generate_create_mutation()\n self.generate_update_mutation()\n self.generate_delete_mutation()\n self.generate_single_query()\n self.generate_many_query()\n\n self.generate_types()\n\n # Allows fields to be added directly to mutation and query\n def to_magql(self) -> None:\n if self.create:\n self.mutation.fields[self.create_mutation_name] = self.create\n if self.update:\n self.mutation.fields[self.update_mutation_name] = self.update\n if self.delete:\n self.mutation.fields[self.delete_mutation_name] = self.delete\n if self.single:\n self.query.fields[self.single_query_name] = self.single\n if self.many:\n self.query.fields[self.many_query_name] = self.many\n\n # a manager map can be passed in to give information about\n # other managers, such as an overriden name, otherwise a default is used\n def add_rels(self, managers: t.Optional[t.List[MagqlManager]] = None) -> None:\n # TODO managers shouldn't be None, no checks for it.\n try:\n table_mapper = get_mapper(self.table)\n except ValueError:\n logging.getLogger(__name__).warning(\n f\"No mapper for table {self.table.name!r}.\"\n )\n return None\n\n for rel_name, rel in table_mapper.relationships.items():\n rel_table = rel.target\n\n if rel_table.name in t.cast(t.List[MagqlManager], managers):\n rel_manager = t.cast(t.List[MagqlManager], managers)[rel_table.name]\n if rel_manager is None:\n continue\n else:\n rel_manager = None\n direction = rel.direction.name\n required = is_rel_required(rel)\n\n field_name = js_camelize(rel_name)\n\n # use magql name of rel manager if it exists else use default name\n target_name = (\n rel_manager.magql_name if rel_manager else camelize(rel.target.name)\n )\n\n base_field = target_name\n input_field_types = {\n str: \"String\",\n int: \"Int\",\n bool: \"Boolean\",\n float: \"Float\",\n }\n\n try:\n field_type = input_field_types[\n rel_table.primary_key.columns.id.type.python_type\n ]\n except KeyError:\n raise KeyError(\n \"The value set as the primary key for the relationship is not valid\"\n )\n\n input_field: t.Union[str, MagqlList] = field_type\n input_required_field: t.Union[str, MagqlList, MagqlNonNull] = field_type\n\n if \"TOMANY\" in direction:\n base_field = MagqlList(base_field)\n input_required_field = MagqlList(input_required_field)\n input_field = MagqlList(input_field)\n elif required:\n input_required_field = MagqlNonNull(input_required_field)\n\n if (\n field_name\n not in self.magql_types[self.magql_name + \"InputRequired\"].fields\n ):\n self.magql_types[self.magql_name + \"InputRequired\"].fields[\n field_name\n ] = MagqlInputField(input_required_field)\n if field_name not in self.magql_types[self.magql_name + \"Input\"].fields:\n self.magql_types[self.magql_name + \"Input\"].fields[\n field_name\n ] = MagqlInputField(input_field)\n if field_name not in self.magql_types[self.magql_name].fields:\n self.magql_types[self.magql_name].fields[field_name] = MagqlField(\n base_field, None, Resolver()\n )\n if field_name not in self.magql_types[self.magql_name + \"Filter\"].fields:\n self.magql_types[self.magql_name + \"Filter\"].fields[\n field_name\n ] = MagqlInputField(RelFilter)\n self.magql_types[self.magql_name + \"Payload\"] = MagqlNonNull(\n MagqlObjectType(\n self.magql_name + \"Payload\",\n {\n \"errors\": MagqlField(MagqlList(\"String\")),\n \"result\": MagqlField(self.magql_name, None, ResultResolver()),\n },\n )\n )\n\n self.magql_types[self.magql_name + \"ListPayload\"] = MagqlNonNull(\n MagqlObjectType(\n self.magql_name + \"ListPayload\",\n {\n \"errors\": MagqlField(MagqlList(\"String\")),\n \"result\": MagqlField(\n MagqlList(self.magql_name), None, ResultResolver()\n ),\n \"count\": MagqlField(\"Int\", None, CountResolver()),\n },\n )\n )\n return None\n","sub_path":"src/magql/manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":18331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"292964797","text":"# Analyse frames using the DDM.\n# Resynthesize using a modified McAulay and Quatieri method, taking into\n# consideration the estimated frequency slope.\n# Here we use a quartic polynomial of phase and amplitude\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport sigmod as sm\nimport matplotlib.colors as colors\nimport neplot as nep\n\nshow_plots=False\n\n# Color contrast config\n# values further from 1, more contrast\nclr_gamma=4.\nclr_mapper=nep.PowerNormalize(clr_gamma)\n\nplotoutpath=os.environ['HOME']+'/Documents/development/masters_thesis/reports/plots/'\nplotoutpath+='mq_exp_mod_quintic'\n\nplt.rc('text',usetex=True)\nplt.rc('font',family='serif')\n\n# Hop size\nH=256\n# Analysis window / FFT size\nN=1024\n\n# Sample rate\nFs=16000.\n\n# Length of signal, seconds\nT_x=0.5\n# Length in samples\nM=int(np.floor(Fs*T_x))+N\n# sample indices\nm=np.arange(M)\n# initial phase\nphi_0=0.\n\n# Start pitch\npch_0=0\n# End pitch\npch_1=12.\n# Exponential coefficients\nb0=pch_0/12.\nb1=pch_1/12./T_x\nb=np.log(2.)*np.r_[b1,b0]\n# Frequency of pitch of no transposition (Hz)\nf0=440.\nt_x=m/Fs\nf_t=f0*np.exp(np.polyval(b,t_x))\n\n# Synthesize signal\narg_ph_x=2.*np.pi*f0/(np.log(2.)*b1)*np.exp(np.polyval(b,t_x))+phi_0\nwith open(plotoutpath+'_arg_ph_x.f64','w') as f:\n arg_ph_x.tofile(f)\narg_a_x=np.ones(M)\nwith open(plotoutpath+'_arg_a_x.f64','w') as f:\n arg_a_x.tofile(f)\nx=np.exp(1j*arg_ph_x)\n\n# Estimated parameters\nth=[]\n# Hop size\nH=128\n# Analysis window / FFT size\nN=512\n# Analysis window indices\nn=np.arange(N)\nW,dW=sm.w_dw_sum_cos(N,'c1-blackman-4')\n\n# Plot\nplt.figure(1)\nplt.specgram(x,NFFT=N,noverlap=(N-H),Fs=Fs,norm=clr_mapper,cmap=\"Greys\")\nplt.title('Original signal: (spectrogram)')\nplt.xlabel('Time (seconds)')\nplt.ylabel('Frequency (Hz)')\nplt.gca().set_xlim(0,(len(x)-N)/float(Fs))\nplt.gca().set_ylim(f_t.min()*0.5,f_t.max()*1.5)\nplt.savefig(plotoutpath+'_original_spec.eps')\n\nfor h in np.arange(0,M-N,H):\n x_=x[h:h+N]\n a_=sm.ddm_p2_1_3(x_,W,dW)\n # Store estimated parameters\n th.append(a_)\n\n# Polynomial error bounds\neb_c5=[]\neb_c4=[]\neb_c3=[]\neb_d5=[]\neb_d4=[]\neb_d3=[]\neb_ph=[]\neb_a=[]\n\n# Synthesize using modified McAulay & Quatieri quintic phase method\nh=0\ny=np.zeros(len(x)).astype('complex_')\n# Argument x of phase function exp(j*x)\narg_ph=np.zeros(len(x)).astype('double')\n# Argument x of amplitude function exp(j*x)\narg_a=np.zeros(len(x)).astype('double')\nfor i in xrange(len(th)-1):\n phi_i0=np.imag(th[i][0])\n phi_i1=np.imag(th[i+1][0])\n w_i0=np.imag(th[i][1])\n w_i1=np.imag(th[i+1][1])\n psi_i0=np.imag(th[i][2])\n psi_i1=np.imag(th[i+1][2])\n # Compute M*\n M=np.round((20.*H*(w_i0+w_i1)+(H**2.)*(psi_i0-psi_i1)+40.*(phi_i0-phi_i1))/(80.*np.pi))\n # Compute phase polynomial coefficients\n c5_p=np.r_[\n 6.*(phi_i1-phi_i0+2.*np.pi*M),\n -3.*(w_i1+w_i0),\n 0.5*(psi_i1-psi_i0),\n 0.,\n 0.,\n 0]\n c4_p=np.r_[\n 15.*(phi_i0-phi_i1-2.*np.pi*M),\n 7.*w_i1+8.*w_i0,\n 1.5*psi_i0-psi_i1,\n 0.,\n 0.]\n c3_p=np.r_[\n 10.*(phi_i1-phi_i0+2.*np.pi*M),\n -4.*w_i1-6.*w_i0,\n 0.5*psi_i1-1.5*psi_i0,\n 0.]\n c5,eb_c5_=sm.polyval_mu(c5_p,1./H)\n c4,eb_c4_=sm.polyval_mu(c4_p,1./H)\n c3,eb_c3_=sm.polyval_mu(c3_p,1./H)\n eb_c5.append(eb_c5_)\n eb_c4.append(eb_c4_)\n eb_c3.append(eb_c3_)\n c2=0.5*psi_i0\n c1=w_i0\n c0=phi_i0\n c=np.r_[c5,c4,c3,c2,c1,c0]\n # evaluate phase polynomial\n ph_,eb_ph_=sm.polyval_mu(c,np.arange(H))\n arg_ph[h:h+H]=ph_\n eb_ph += list(eb_ph_)\n y[h:h+H]=np.exp(1j*ph_)\n # compute amplitude polynomial coefficients\n a0_i0=np.real(th[i][0])\n a0_i1=np.real(th[i+1][0])\n a1_i0=np.real(th[i][1])\n a1_i1=np.real(th[i+1][1])\n a2_i0=np.real(th[i][2])\n a2_i1=np.real(th[i+1][2])\n # Find coefficients of quintic amplitude function:\n # mu(t) = d5*t^5 + d4*t^4 + d3*t^3 + d2*t^2 + d1*t + d0\n # at t=0, mu(0)= a0_i0 so\n d0=a0_i0\n d5_p=np.r_[\n 6.*(a0_i1-a0_i0),\n -3.*(a1_i1+a1_i0),\n 0.5*(a2_i1-a2_i0),\n 0.,\n 0.,\n 0.]\n d4_p=np.r_[\n 15.*(a0_i0-a0_i1),\n 7.*a1_i1+8.*a1_i0,\n 1.5*a2_i0-a2_i1,\n 0.,\n 0.]\n d3_p=np.r_[\n 10.*(a0_i1-a0_i0),\n -4.*a1_i1-6.*a1_i0,\n 0.5*a2_i1-1.5*a2_i0,\n 0.]\n d5,eb_d5_=sm.polyval_mu(d5_p,1./H)\n d4,eb_d4_=sm.polyval_mu(d4_p,1./H)\n d3,eb_d3_=sm.polyval_mu(d3_p,1./H)\n eb_d5.append(eb_d5_)\n eb_d4.append(eb_d4_)\n eb_d3.append(eb_d3_)\n d5=np.polyval(d5_p,1./H)\n d4=np.polyval(d4_p,1./H)\n d3=np.polyval(d3_p,1./H)\n d2=0.5*a2_i0\n d1=a1_i0\n d0=a0_i0\n d=np.r_[d5,d4,d3,d2,d1,d0]\n # Multiply by amplitude function\n arg_a[h:h+H],eb_a_=sm.polyval_mu(d,np.arange(H))\n y[h:h+H]*=np.exp(arg_a[h:h+H])\n eb_a += list(eb_a_)\n h+=H\n\n# Save phase and log-amplitude polynomials\nwith open(plotoutpath+'_arg_ph.f64','w') as f:\n arg_ph.tofile(f)\nwith open(plotoutpath+'_arg_a.f64','w') as f:\n arg_a.tofile(f)\n\n# Save true and estimated signals\nwith open(plotoutpath+'_true_x.dat','w') as f:\n x.tofile(f)\nwith open(plotoutpath+'_est_x.dat','w') as f:\n y.tofile(f)\n\n\nplt.figure(2)\nplt.specgram(y,NFFT=N,noverlap=(N-H),Fs=Fs,norm=clr_mapper,cmap=\"Greys\")\nplt.title('Estimated signal (spectrogram)')\nplt.xlabel('Time (seconds)')\nplt.ylabel('Frequency (Hz)')\nplt.gca().set_xlim(0,(h-N)/float(Fs))\nplt.gca().set_ylim(f_t.min()*0.5,f_t.max()*1.5)\nplt.savefig(plotoutpath+'_estimated_spec.eps')\n\nplt.figure(3)\n# Plot length\nN_plt_0=2000\nN_plt_1=3000\nplt.plot(m/float(Fs),np.real(x),c='k',label='True')\nplt.plot(m/float(Fs),np.real(y),c='Gray',label='Estimated')\nplt.gca().set_xlim(N_plt_0/float(Fs),N_plt_1/float(Fs))\nplt.title('True vs. Estimated signal (real part)')\nplt.ylabel('Amplitude')\nplt.xlabel('Time (seconds)')\nplt.legend()\nplt.savefig(plotoutpath+'_orig_vs_est.eps')\nplt.figure(4)\nplt.plot(m/float(Fs),20.*np.log10(np.abs(y-x)),c='k')\nplt.gca().set_xlim(0,(h-N)/float(Fs))\nplt.title('Error signal (db Error)')\nplt.ylabel('Amplitude (dB power)')\nplt.xlabel('Time (seconds)')\nplt.savefig(plotoutpath+'_error.eps')\nplt.figure(5)\ntmp=np.array([np.log(eb_ph_)/np.log(10.) for eb_ph_ in\n eb_ph])\nma_,mai_=sm.lextrem(tmp,comp='max')\nplt.plot(np.arange(len(eb_ph))[mai_],tmp[mai_],label=\"Phase\",c='k',ls='-')\ntmp=np.array([np.log(eb_a_)/np.log(10.) for eb_a_ in eb_a])\nma_,mai_=sm.lextrem(tmp,comp='max')\nplt.plot(np.arange(len(eb_a))[mai_],tmp[mai_],\n label=\"Amplitude\",c='k',ls=':')\nplt.xlabel('Sample number')\nplt.ylabel('Absolute error bound ($\\log_{10}$)')\nplt.title('Polynomial evaluation error bound')\nplt.legend(loc='best')\nplt.savefig(plotoutpath+'_poly_eval_err.eps')\nplt.figure(6)\nplt.plot(np.arange(len(eb_c5)),np.log(np.array([eb_c5,eb_c4,eb_c3,eb_d5,eb_d4,eb_d3]).T)/np.log(10.))\nplt.xlabel('Frame number')\nplt.ylabel('Absolute error bound')\nplt.title('Polynomial evaluation error bound')\n\nif (show_plots):\n plt.show()\n","sub_path":"test/mq_exp_test_3.py","file_name":"mq_exp_test_3.py","file_ext":"py","file_size_in_byte":7045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"540169132","text":"# -*- coding: utf-8 -*-\n\nimport json\nimport re\nfrom collections import Counter, OrderedDict\nfrom datetime import date\nfrom hashlib import sha1\nfrom itertools import chain, product\nfrom operator import attrgetter, itemgetter\n\nfrom dateutil.parser import parse\n\nfrom django.conf import settings\nfrom django.contrib.postgres.fields import JSONField\nfrom django.contrib.staticfiles.storage import staticfiles_storage\nfrom django.core.urlresolvers import reverse, reverse_lazy\nfrom django.db import models\nfrom django.db.models import FieldDoesNotExist\nfrom django.template import loader\nfrom django.template.defaultfilters import date as convert_date, escape\nfrom django.utils.encoding import force_str\nfrom django.utils.functional import cached_property\n\nfrom limpyd import model as lmodel, fields as lfields\nfrom limpyd.contrib.collection import ExtendedCollectionManager\n\nfrom gim.core import models as core_models, get_main_limpyd_database, thread_data\nfrom gim.core.models.base import GithubObject\nfrom gim.core.diffutils import get_encoded_hunk_shas, split_hunks, split_patch_into_hunks\nfrom gim.core.utils import cached_method, graph_from_edges, dfs_topsort_traversal, stop_after_seconds\nfrom gim.events.models import EventPart, Event\nfrom gim.front.utils import text_to_markdown\nfrom gim.subscriptions import models as subscriptions_models\nfrom gim.ws import publisher\n\n\nSPONSORS_EXTRACT_START_DATE = date(2017, 1, 1)\n\n\ndef html_content(self, body_field='body', force=False):\n html = None\n if not force:\n html = getattr(self, '%s_html' % body_field, None)\n if html is None:\n html = text_to_markdown(getattr(self, body_field))\n return html\n\n\nclass FrontEditable(models.Model):\n\n front_uuid = models.CharField(max_length=36, blank=True, null=True)\n\n class Meta:\n abstract = True\n\n def defaults_create_values(self, mode):\n values = self.old_defaults_create_values(mode)\n values.setdefault('simple', {})['front_uuid'] = self.front_uuid\n if hasattr(self, 'is_new'):\n values['simple']['is_new'] = self.is_new\n return values\n\n def clear_front_uuid(self):\n # We don't call save as we are already in a save call and don't want things to be called twice\n self.__class__.objects.filter(pk=self.pk).update(front_uuid=None)\n\n @staticmethod\n def isinstance(obj):\n try:\n obj._meta.get_field('front_uuid')\n except FieldDoesNotExist:\n return False\n else:\n return True\n\n\nclass Hashable(object):\n\n @property\n def hash(self):\n return sha1(force_str(self.hash_values)).hexdigest()\n\n @property\n def hash_values(self):\n\n raise NotImplementedError()\n\n def hash_changed(self, force_update=False):\n \"\"\"\n Tells if the current hash is different of the saved one\n \"\"\"\n hash_obj, hash_obj_created = H.get_or_connect_for_obj(self)\n\n self.previous_hash = hash_obj.get_for_obj(self)\n\n hash = self.hash\n\n if not force_update and not hash_obj_created and str(hash) == self.previous_hash:\n return False\n\n # save the new hash\n hash_obj.set_for_obj(self, hash, is_hex=True)\n\n return hash\n\n\nclass _GithubUser(Hashable, models.Model):\n AVATAR_STARTS = [\n # 0.gravatar.com => gravatar.com\n (re.compile('^(https?://)\\d+\\.'), r'\\1'),\n # avatars0.githubusercontent.com => avatars.githubusercontent.com\n (re.compile('^(https?://[^\\.]+)\\d+\\.'), r'\\1.'),\n ]\n\n class Meta:\n abstract = True\n\n @classmethod\n def get_default_avatar(cls):\n if not hasattr(core_models.GithubUser, '_default_avatar'):\n core_models.GithubUser._default_avatar = staticfiles_storage.url('front/img/default-avatar.png')\n return core_models.GithubUser._default_avatar\n\n @cached_property\n def full_avatar_url(self):\n if self.avatar_url:\n return '%s%s' % (settings.AVATARS_PREFIX, self.avatar_url)\n return core_models.GithubUser.get_default_avatar()\n\n @property\n def hash_values(self):\n \"\"\"\n Hash for this object representing its state at the current time, used to\n know if we have to reset an issue's cache\n \"\"\"\n\n avatar_url = ''\n\n if self.avatar_url:\n avatar_url = self.avatar_url\n\n # if we have a number at the end of the subdomain, we remove it because it may\n # change between requests to the github api for the same user with the save avatar\n for regex, repl in self.AVATAR_STARTS:\n if regex.match(avatar_url):\n avatar_url = regex.sub(repl, avatar_url, count=1)\n break\n\n return self.username, avatar_url\n\n def get_related_issues(self):\n \"\"\"\n Return a list of all issues related to this user (it may be the creator,\n an assignee, a user requested for review, or the closer)\n \"\"\"\n return core_models.Issue.objects.filter(\n models.Q(user=self)\n | models.Q(assignees=self)\n | models.Q(requested_reviewers=self)\n | models.Q(closed_by=self)\n )\n\n @cached_property\n def readable_subscribed_repositories(self):\n \"\"\"\n Return a dict with, for each available repository for the user, the\n repository fullname as key and the \"Subscription\" object as value\n \"\"\"\n from gim.subscriptions.models import SUBSCRIPTION_STATES\n\n ids = self.subscriptions.filter(\n state__in=SUBSCRIPTION_STATES.READ_RIGHTS).values_list('repository_id', flat=True)\n\n return core_models.Repository.objects.filter(id__in=ids).extra(select={\n 'lower_name': 'lower(name)',\n }\n ).select_related('owner').order_by('owner__username_lower', 'lower_name')\n\n def is_admin_or_org_name(self, name):\n try:\n return bool(self.get_org_by_name(name, True))\n except:\n return False\n\n def get_org_by_name(self, name, admin_only=False):\n name = name.lower()\n orgnizations = self.cached_admin_organizations if admin_only else self.cached_organizations\n return [org for org in orgnizations if org.username_lower == name][0]\n\n def get_orgs_subscriptions(self):\n \"\"\"Get a dict with every valid subscriptions for every user sub scriptions\n\n Keys are the organisations, values are a set of repositories for each organization.\n\n Are only included normal organization where the user is active, or where the user is\n an outside-collaborator.\n And only valid subscriptions are returned.\n\n \"\"\"\n\n if self.is_organization:\n return {}\n\n if not hasattr(self, '_orgs_subscriptions'):\n self._orgs_subscriptions = {\n membership.organization: {\n subscription.repository\n for subscription\n in membership.organization.subscriptions.all()\n }\n for membership\n in self.memberships_as_user.filter(\n models.Q(role='outside-collaborator') | models.Q(state='active')\n ).only(\n 'user', 'organization' # don't really why `user` is needed because not used\n ).select_related(\n 'organization'\n ).prefetch_related(\n models.Prefetch(\n 'organization__subscriptions',\n queryset=subscriptions_models.Subscription.objects.exclude(\n state=subscriptions_models.Subscription.STATES.NORIGHTS\n ).only(\n 'user', 'repository'\n ).select_related(\n 'repository__owner'\n )\n )\n )\n if membership.organization.subscriptions.all()\n }\n return self._orgs_subscriptions\n\nclass _Repository(Hashable, models.Model):\n class Meta:\n abstract = True\n\n def get_reverse_kwargs(self):\n \"\"\"\n Return the kwargs to use for \"reverse\"\n \"\"\"\n return {\n 'owner_username': self.owner.username,\n 'repository_name': self.name,\n }\n\n def get_absolute_url(self):\n return self.get_view_url('home')\n\n def get_view_url(self, url_name):\n return reverse_lazy('front:repository:%s' % url_name, kwargs=self.get_reverse_kwargs())\n\n def get_create_issue_url(self):\n return self.get_view_url('issue.create')\n\n def get_create_project_url(self):\n from gim.front.repository.board.views import ProjectCreateView\n return self.get_view_url(ProjectCreateView.url_name)\n\n def get_multiselect_base_url(self):\n from gim.front.repository.issues.multiselect.views import MultiSelectListLabelsView\n url = reverse('front:repository:multiselect:%s' % MultiSelectListLabelsView.url_name, kwargs = self.get_reverse_kwargs())\n base_url = url[:-12]\n assert base_url.endswith('/multiselect/')\n return base_url\n\n def delete(self, *args, **kwargs):\n pk = self.pk\n\n # When deleting a repository we don't publish when things (comments...) are deleted\n thread_data.skip_publish = True\n try:\n self.old_delete(*args, **kwargs)\n finally:\n thread_data.skip_publish = False\n\n publisher.remove_repository(pk)\n\n def get_milestones_for_select(self, key='id', with_graph_url=False, include_grouped=True,\n milestones=None):\n\n if milestones is None:\n milestones = self.milestones.all()\n\n data = {getattr(m, key): {\n 'id': m.id,\n 'number': m.number,\n 'due_on': convert_date(m.due_on, settings.DATE_FORMAT) if m.due_on else None,\n 'title': escape(m.title),\n 'state': m.state,\n 'graph_url': str(m.get_graph_url()) if with_graph_url else None,\n }\n for m in milestones\n }\n\n result = {\n 'milestones_json': json.dumps(data),\n }\n\n if include_grouped:\n\n grouped_milestones = {}\n for milestone in milestones:\n grouped_milestones.setdefault(milestone.state, []).append(milestone)\n\n result['grouped_milestones'] = grouped_milestones\n\n return result\n\n def all_metrics(self):\n return self.label_types.filter(is_metric=True)\n\n @cached_property\n def project_columns(self):\n return core_models.Column.objects.filter(project__repository=self)\n\n def get_sponsors_info(self, for_user):\n \"\"\"Get the sponsors info to be displayed\n\n We'll display only one sponsor, the same for the whole day, and a short sentence\n with the number of other sponsors. Hovering on this sentence will display the list\n of the other sponsors, sorted alphabetically.\n If the user passed via `for_user` is a sponsor, it will be the main sponsor.\n If he belongs to the only sponsoring organization, this organization will be displayed.\n If he belongs to many sponsoring organizations, the same algorithm (the same for the whole\n day) will be used.\n\n Parameters\n ----------\n for_user: GithubUser\n The user for whom to display the sponsors for this repository\n\n Returns\n -------\n tuple(2)\n A tuple with two entries:\n 0. One sponsor to display\n 2. The list of other sponsors\n\n \"\"\"\n\n sponsors = sorted(\n self.sponsors.all(),\n key=lambda sponsor: (sponsor.full_name or sponsor.username).lower()\n )\n\n if not sponsors:\n return None, []\n\n if len(sponsors) == 1:\n return sponsors[0], []\n\n sponsoring_organizations = set(sponsor for sponsor in sponsors if sponsor.is_organization)\n\n main_sponsor = None\n possible_main_sponsors = sponsors\n\n if for_user in sponsors:\n main_sponsor = for_user\n elif sponsoring_organizations:\n organizations = sponsoring_organizations.intersection(for_user.cached_organizations)\n if len(organizations) == 1:\n main_sponsor = organizations.pop()\n elif len(organizations):\n possible_main_sponsors = organizations\n\n if not main_sponsor:\n main_sponsor = self.extract_main_sponsor(possible_main_sponsors)\n\n other_sponsors = [sponsor for sponsor in sponsors if sponsor != main_sponsor]\n\n return main_sponsor, other_sponsors\n\n def get_offered_info(self, for_user, current_org=None):\n \"\"\"Get the orgs offerring this repository\n\n See get_sponsors_info for the logic\n\n \"\"\"\n\n orgs = [\n org\n for org, repos in for_user.get_orgs_subscriptions().items()\n if self in repos\n ]\n\n if not orgs:\n return None, []\n\n if len(orgs) == 1:\n return orgs[0], []\n\n if current_org and current_org in orgs:\n main_org = current_org\n else:\n main_org = self.extract_main_sponsor(orgs)\n\n other_orgs = [org for org in orgs if org != main_org]\n\n return main_org, other_orgs\n\n\n @staticmethod\n def extract_main_sponsor(sponsors):\n \"\"\"Extract a sponsor from the list\n\n The extraction is done in a way than the same sponsor will be\n displayed for the whole day, but will vary from day to day,\n with each sponsor having the exact same chance to be displayed.\n\n Parameters\n ----------\n sponsors: iterable(GithubUser)\n The list of sponsors from which to extract the main one.\n This list is assumed to be sorted!\n\n Returns\n -------\n GithubUser\n The extracted sponsor\n\n \"\"\"\n\n return sponsors[\n (date.today() - SPONSORS_EXTRACT_START_DATE).days % len(sponsors)\n ]\n\n @property\n def hash_values(self):\n return self.full_name\n\nclass _LabelType(Hashable, models.Model):\n class Meta:\n abstract = True\n\n def get_reverse_kwargs(self):\n \"\"\"\n Return the kwargs to use for \"reverse\"\n \"\"\"\n return {\n 'owner_username': self.repository.owner.username,\n 'repository_name': self.repository.name,\n 'label_type_id': self.id\n }\n\n def get_view_url(self, url_name):\n return reverse_lazy('front:repository:%s' % url_name, kwargs=self.get_reverse_kwargs())\n\n def get_edit_url(self):\n from gim.front.repository.dashboard.views import LabelTypeEdit\n return self.get_view_url(LabelTypeEdit.url_name)\n\n def get_delete_url(self):\n from gim.front.repository.dashboard.views import LabelTypeDelete\n return self.get_view_url(LabelTypeDelete.url_name)\n\n @property\n def hash_values(self):\n \"\"\"\n Hash for this object representing its state at the current time, used to\n know if we have to reset an issue's cache\n \"\"\"\n return self.id, self.name\n\n def get_related_issues(self):\n \"\"\"\n Return a list of all issues related to this label type\n \"\"\"\n return core_models.Issue.objects.filter(labels__label_type=self)\n\n\nclass _Label(Hashable, FrontEditable):\n class Meta:\n abstract = True\n\n @property\n def hash_values(self):\n \"\"\"\n Hash for this object representing its state at the current time, used to\n know if we have to reset an issue's cache\n \"\"\"\n return self.id, self.name, (self.color or ''), (self.label_type.hash if self.label_type_id else '')\n\n def get_related_issues(self):\n \"\"\"\n Return a list of all issues related to this label\n \"\"\"\n return core_models.Issue.objects.filter(labels=self)\n\n\nclass _Milestone(Hashable, FrontEditable):\n class Meta:\n abstract = True\n\n @property\n def hash_values(self):\n \"\"\"\n Hash for this object representing its state at the current time, used to\n know if we have to reset an issue's cache\n \"\"\"\n return self.id, self.title, self.state\n\n def get_related_issues(self):\n \"\"\"\n Return a list of all issues related to this milestone\n \"\"\"\n return core_models.Issue.objects.filter(milestone=self)\n\n @property\n def html_content(self):\n return html_content(self, 'description')\n\n @property\n def short_title(self):\n if len(self.title) > 25:\n result = self.title[:20] + u'…'\n else:\n result = self.title\n return escape(result)\n\n def get_reverse_kwargs(self, key=\"id\"):\n \"\"\"\n Return the kwargs to use for \"reverse\"\n \"\"\"\n return {\n 'owner_username': self.repository.owner.username,\n 'repository_name': self.repository.name,\n 'milestone_%s' % key: getattr(self, key),\n }\n\n def get_view_url(self, url_name, key=\"id\"):\n return reverse_lazy('front:repository:%s' % url_name, kwargs=self.get_reverse_kwargs(key=key))\n\n def get_edit_url(self):\n from gim.front.repository.dashboard.views import MilestoneEdit\n return self.get_view_url(MilestoneEdit.url_name)\n\n def get_delete_url(self):\n from gim.front.repository.dashboard.views import MilestoneDelete\n return self.get_view_url(MilestoneDelete.url_name)\n\n def get_graph_url(self):\n from gim.front.repository.dashboard.views import MilestoneGraph\n return self.get_view_url(MilestoneGraph.url_name, key='number')\n\n\nclass WithFiles(object):\n\n def files_enhanced_for_user(self, user, files):\n counts = self.comments_count_by_path\n\n for file in list(files):\n\n split_lines = file.get_split_lines_for_user(user)\n if split_lines:\n original_hunks = split_patch_into_hunks(file.patch)\n hunks = split_hunks(original_hunks, split_lines)\n if len(hunks) != len(original_hunks):\n file.patch = '\\n'.join(chain.from_iterable(hunks))\n file.hunk_shas = get_encoded_hunk_shas(hunks)\n file.hunks = hunks\n\n file.repository = self.repository\n file.nb_comments = counts.get(file.path, 0)\n file.reviewed_hunks_locally = file.get_hunks_locally_reviewed_by_user(user)\n file.reviewed_locally = all(file.reviewed_hunks_locally.values())\n\n return files\n\n\nclass _Issue(WithFiles, Hashable, FrontEditable):\n\n pr_grouped_commits = JSONField(blank=True, null=True)\n\n class Meta:\n abstract = True\n\n RENDERER_IGNORE_FIELDS = {'state', 'merged'}\n PR_GROUPED_COMMITS_VERSION = 1\n\n def get_reverse_kwargs(self):\n \"\"\"\n Return the kwargs to use for \"reverse\"\n \"\"\"\n return {\n 'owner_username': self.repository.owner.username,\n 'repository_name': self.repository.name,\n 'issue_number': self.number\n }\n\n def get_view_url(self, url_name):\n return reverse_lazy('front:repository:%s' % url_name, kwargs=self.get_reverse_kwargs())\n\n def get_absolute_url(self):\n return self.get_view_url('issue')\n\n def get_websocket_data_url(self):\n return self.get_view_url('issue.summary')\n\n def get_preview_url(self):\n return self.get_view_url('issue.preview')\n\n def get_created_url(self):\n kwargs = self.get_reverse_kwargs()\n del kwargs['issue_number']\n kwargs['issue_pk'] = self.pk\n return reverse_lazy('front:repository:issue.created', kwargs=kwargs)\n\n def edit_field_url(self, field):\n return self.get_view_url('issue.edit.%s' % field)\n\n def issue_comment_create_url(self):\n from gim.front.repository.issues.views import IssueCommentCreateView\n return self.get_view_url(IssueCommentCreateView.url_name)\n\n def pr_comment_create_url(self):\n if not hasattr(self, '_pr_comment_create_url'):\n from gim.front.repository.issues.views import PullRequestCommentCreateView\n self._pr_comment_create_url = self.get_view_url(PullRequestCommentCreateView.url_name)\n return self._pr_comment_create_url\n\n def pr_review_create_url(self):\n if not hasattr(self, '_pr_review_create_url'):\n from gim.front.repository.issues.views import PullRequestReviewCreateView\n self._pr_review_create_url = self.get_view_url(PullRequestReviewCreateView.url_name)\n return self._pr_review_create_url\n\n def ajax_files_url(self):\n return self.get_view_url('issue.files')\n\n def ajax_commits_url(self):\n return self.get_view_url('issue.commits')\n\n def ajax_review_url(self):\n return self.get_view_url('issue.review')\n\n def ajax_commit_base_url(self):\n if not hasattr(self, '_ajax_commit_base_url'):\n kwargs = self.get_reverse_kwargs()\n kwargs['commit_sha'] = '0' * 40\n from gim.front.repository.issues.views import CommitAjaxIssueView\n self._ajax_commit_base_url = reverse_lazy('front:repository:%s' % CommitAjaxIssueView.url_name,\n kwargs=kwargs)\n return self._ajax_commit_base_url\n\n def ajax_commit_compare_base_url(self):\n if not hasattr(self, '_ajax_commit_compare_base_url'):\n kwargs = self.get_reverse_kwargs()\n kwargs['commit_sha'] = '0' * 40\n kwargs['other_commit_sha'] = '1' * 40\n from gim.front.repository.issues.views import CommitAjaxCompareView\n self._ajax_commit_compare_base_url = reverse_lazy('front:repository:%s' % CommitAjaxCompareView.url_name,\n kwargs=kwargs)\n return self._ajax_commit_compare_base_url\n\n def commit_comment_create_url(self):\n if not hasattr(self, '_commit_comment_create_url'):\n kwargs = self.get_reverse_kwargs()\n kwargs['commit_sha'] = '0' * 40\n from gim.front.repository.issues.views import CommitCommentCreateView\n self._commit_comment_create_url = reverse_lazy('front:repository:%s' % CommitCommentCreateView.url_name,\n kwargs=kwargs)\n return self._commit_comment_create_url\n\n def ajax_branch_deletion_url(self):\n from gim.front.repository.issues.views import IssueDeletePRBranch\n return self.get_view_url(IssueDeletePRBranch.url_name)\n\n @property\n def type(self):\n return 'pull request' if self.is_pull_request else 'issue'\n\n @property\n def nb_authors(self):\n if not self.is_pull_request or not self.nb_commits:\n return 0\n if self.nb_commits == 1:\n return 1\n return len(set(self.commits.filter(related_commits__deleted=False)\n .values_list('author_name', flat=True)))\n\n @property\n def hash_values(self):\n \"\"\"\n Hash for this issue representing its state at the current time, used to\n know if we have to reset its cache\n \"\"\"\n\n hashable_fields = ('title', 'body', 'state', 'number', 'is_pull_request', 'updated_at', 'user_id', 'closed_by_id', 'milestone_id', 'total_comments_count')\n\n if self.is_pull_request:\n hashable_fields += ('base_sha', 'head_sha', 'pr_review_state', 'merged', 'last_head_status', 'pr_review_required', 'displayable_pr_reviews_count')\n if self.state == 'open' and not self.merged:\n hashable_fields += ('mergeable_state', 'mergeable', )\n\n hashable_lists = [\n self.assignees.values_list('pk', flat=True),\n self.requested_reviewers.values_list('pk', flat=True),\n self.labels.values_list('pk', flat=True),\n ['%s:%s' % c for c in self.cards.values_list('column_id', 'position')],\n\n ]\n\n if self.is_pull_request:\n hashable_lists += [\n self.related_commits.filter(deleted=False).values_list('commit__sha', flat=True),\n ]\n\n return tuple(\n getattr(self, field, None) for field in hashable_fields\n ) + tuple(chain.from_iterable(\n sorted(hashable_list) for hashable_list in hashable_lists\n )) + (self.repository.hash, )\n\n def update_saved_hash(self):\n \"\"\"\n Update in redis the saved hash\n \"\"\"\n hash_obj, _ = H.get_or_connect_for_obj(self)\n hash_obj.set_for_obj(self, self.hash, is_hex = True)\n\n @property\n def saved_hash(self):\n \"\"\"\n Return the saved hash, create it if not exist\n \"\"\"\n hash_obj, created = H.get_or_connect_for_obj(self)\n if created:\n self.update_saved_hash()\n return hash_obj.get_for_obj(self)\n\n def update_cached_template(self, force_regenerate=False):\n \"\"\"\n Update, if needed, the cached template for the current issue.\n \"\"\"\n template = 'front/repository/issues/include_issue_item_for_cache.html'\n\n # minimize queries\n issue = self.__class__.objects.filter(\n id=self.id\n ).select_related(\n 'repository__owner', 'user', 'closed_by', 'milestone'\n ).prefetch_related(\n 'assignees', 'labels__label_type', 'cards__column__project'\n )[0]\n\n context = {\n 'issue': issue,\n '__regenerate__': force_regenerate,\n }\n\n loader.get_template(template).render(context)\n\n @cached_method\n def all_commits(self, include_deleted, sort=True, only_ready=True):\n qs = self.related_commits.select_related(\n 'commit__author', 'commit__committer', 'commit__repository__owner'\n )\n\n if sort:\n qs = qs.order_by(\n 'commit__authored_at', 'commit__committed_at'\n )\n\n filters = {}\n\n if not include_deleted:\n filters['deleted'] = False\n\n if only_ready:\n filters['commit__authored_at__isnull'] = False\n\n if filters:\n qs = qs.filter(**filters)\n\n result = []\n for ic in qs:\n ic.commit.relation_deleted = ic.deleted\n ic.commit.pull_request_head_at = ic.pull_request_head_at\n result.append(ic.commit)\n\n return result\n\n def save_regrouped_commits(self, groups):\n self.pr_grouped_commits = {\n 'version': self.PR_GROUPED_COMMITS_VERSION,\n 'head_sha': self.head_sha,\n 'groups': [\n {\n 'head_sha': group['head_sha'],\n 'outdated': group['outdated'],\n 'head_at': str(group['head_at']),\n 'commits_shas': [c.sha for c in group['commits']],\n }\n for group in groups\n ]\n }\n\n self.save(update_fields=['pr_grouped_commits'])\n\n def get_regrouped_commits(self):\n\n try:\n stored = self.pr_grouped_commits\n if stored and stored['head_sha'] == self.head_sha and stored['version'] == self.PR_GROUPED_COMMITS_VERSION:\n commits = self.all_commits(True, False, False)\n by_sha = {c.sha: c for c in commits}\n return [\n {\n 'head_sha': group['head_sha'],\n 'outdated': group['outdated'],\n 'head_at': parse(group['head_at']),\n 'nb_commits': len(group['commits_shas']),\n 'commits_by_day': GroupedCommits.group_by_day([by_sha[sha] for sha in group['commits_shas']], include_without_dates=True),\n }\n for group in stored['groups']\n ]\n except Exception:\n # we'll recompute if we couldn't use stored data\n pass\n\n if self.commits_parents_fetched:\n with stop_after_seconds(2):\n try:\n groups = self.regroup_commits()\n except Exception:\n pass\n else:\n if groups and (len(groups) > 1 or not self.nb_deleted_commits):\n self.save_regrouped_commits(groups)\n for group in groups:\n group['commits_by_day'] = GroupedCommits.group_by_day(group.pop('commits'), include_without_dates=True)\n return groups\n\n # we had a problem, create two groups: deleted and not deleted\n groups = []\n if self.nb_deleted_commits:\n deleted_commits = [c for c in self.all_commits(True, True, True) if c.relation_deleted]\n if deleted_commits:\n head_commit = deleted_commits[-1]\n groups.append({\n 'head_sha': head_commit.sha,\n 'outdated': True,\n 'head_at': head_commit.pull_request_head_at or head_commit.committed_at,\n 'nb_commits': len(deleted_commits),\n 'commits_by_day': GroupedCommits.group_by_day(deleted_commits, include_without_dates=True),\n })\n\n non_deleted_commits = self.all_commits(False, True, True)\n if non_deleted_commits:\n head_commit = non_deleted_commits[-1]\n groups.append({\n 'head_sha': head_commit.sha,\n 'outdated': False,\n 'head_at': head_commit.pull_request_head_at or head_commit.committed_at,\n 'nb_commits': len(non_deleted_commits),\n 'commits_by_day': GroupedCommits.group_by_day(non_deleted_commits, include_without_dates=True),\n })\n\n if self.commits_parents_fetched:\n self.save_regrouped_commits(groups)\n return groups\n\n def regroup_commits(self):\n\n commits = self.all_commits(True, False, False)\n\n by_sha = {c.sha: c for c in commits}\n\n edges = list(chain.from_iterable([\n product(\n [c.sha],\n c.parents or []\n )\n for c in commits\n ]))\n\n graph = graph_from_edges(edges)\n\n # get all base commits\n bases = [sha for sha, parents in graph.items() if not parents]\n\n # remove them from the graph\n graph = {sha: [parent for parent in parents if parent not in bases] for sha, parents in graph.items() if sha not in bases}\n\n # get all head commits\n head_shas = set(graph.keys()) ^ set(chain.from_iterable(graph.values()))\n\n # make one group by head\n groups = []\n for head_sha in head_shas:\n head_commit = by_sha[head_sha]\n commits = [by_sha[sha] for sha in (dfs_topsort_traversal(graph, head_sha))]\n groups.append({\n 'head_sha': head_sha,\n 'outdated': head_commit.relation_deleted,\n 'head_at': head_commit.pull_request_head_at or head_commit.committed_at,\n 'nb_commits': len(commits),\n 'commits': commits,\n })\n\n groups.sort(key=itemgetter('head_at'))\n\n return groups\n\n def get_diffable_commits(self):\n if not self.nb_deleted_commits:\n return {}\n\n all_commits = self.all_commits(True, False, False)\n\n by_authored_at = {}\n for commit in all_commits:\n by_authored_at.setdefault(commit.authored_at, []).append(commit)\n\n result = {}\n for authored_at, commits in by_authored_at.items():\n unique_commits = set(commits)\n if len(unique_commits) < 2:\n continue\n result[authored_at] = []\n for commit in unique_commits:\n for other_commit in unique_commits:\n if commit.sha == other_commit.sha:\n continue\n if commit.committed_at < other_commit.committed_at:\n ordered = [commit, other_commit]\n else:\n ordered = [other_commit, commit]\n result[authored_at].append({\n 'commit': commit,\n 'other_commit': other_commit,\n 'ordered_commits': ordered,\n })\n\n return result\n\n @property\n def all_entry_points(self):\n if not hasattr(self, '_all_entry_points'):\n self._all_entry_points = list(\n self.pr_comments_entry_points.annotate(\n nb_comments=models.Count('comments')\n ).filter(\n nb_comments__gt=0\n ).select_related(\n 'user', 'repository__owner'\n ).prefetch_related(\n 'comments__user'\n )\n )\n return self._all_entry_points\n\n @property\n def all_commit_entry_points(self):\n if not hasattr(self, '_all_commit_entry_points'):\n\n commits = self.all_commits(True, False, True) # only args for cache_method\n commits_by_pk = {commit.pk: commit for commit in commits}\n\n self._all_commit_entry_points = list(\n core_models.CommitCommentEntryPoint.objects.filter(\n commit__id__in=commits_by_pk.keys()\n ).annotate(\n nb_comments=models.Count('comments')\n ).filter(\n nb_comments__gt=0\n ).select_related(\n 'user', 'repository__owner',\n ).prefetch_related(\n 'comments__user'\n )\n )\n\n # cache commit for each entry point, using the ones got from\n # `all_commits`, that include the `relation_deleted` attribute\n for entry_point in self._all_commit_entry_points:\n entry_point._commit_cache = commits_by_pk[entry_point.commit_id]\n\n return self._all_commit_entry_points\n\n def get_activity(self):\n \"\"\"\n Return the activity of the issue, including comments, events and\n pr_comments if it's a pull request\n \"\"\"\n change_events = list(self.event_set.filter(id__in=set(\n EventPart.objects.filter(\n event__issue_id=self.id,\n )\n .exclude(\n event__type=Event.EVENT_TYPE_CHOICES.CREATE,\n event__related_content_type__model='issue'\n )\n .exclude(\n EventPart.get_exclude_ignore_fields()\n )\n .values_list('event_id', flat=True)\n )).prefetch_related('parts'))\n\n comments = list(self.comments.select_related('user', 'repository__owner'))\n\n events = list(self.events.exclude(event='referenced', commit_sha__isnull=True)\n .select_related('user', 'repository__owner'))\n\n activity = change_events + comments + events\n\n if self.is_pull_request:\n pr_comments = list(self.pr_comments.select_related('user'))\n\n activity += pr_comments + self.all_commits(False, True, True) # only args for cache_method\n\n # group commit comments by day + commit\n cc_by_commit = {}\n commit_comments = list(core_models.CommitComment.objects\n .filter(commit__related_commits__issue=self)\n .select_related('commit', 'user'))\n\n if len(commit_comments):\n all_commits_by_sha = {c.sha: c for c in self.all_commits(True, True, True)} # only args for cache_method\n for c in commit_comments:\n\n if c.commit.sha in all_commits_by_sha:\n c.commit.relation_deleted = all_commits_by_sha[c.commit.sha].relation_deleted\n\n cc_by_commit.setdefault(c.commit, []).append(c)\n\n for comments in cc_by_commit.values():\n activity += GroupedCommitComments.group_by_day(comments)\n\n # add pull request reviews\n activity += self.get_pr_reviews_activity()\n\n activity.sort(key=attrgetter('created_at'))\n\n if self.is_pull_request:\n activity = GroupedCommits.group_in_activity(activity)\n activity = GroupedPullRequestComments.group_in_activity(activity)\n\n return activity\n\n def get_pr_reviews_activity(self):\n if not hasattr(self, '_pr_reviews_activity'):\n self._pr_reviews_activity = list(self.reviews.filter(displayable=True).select_related('author'))\n return self._pr_reviews_activity\n\n @property\n def displayable_pr_reviews_count(self):\n return len(self.get_pr_reviews_activity())\n\n def get_sorted_entry_points(self):\n for entry_point in self.all_entry_points:\n entry_point.last_created = list(entry_point.comments.all())[-1].created_at\n return sorted(self.all_entry_points, key=attrgetter('last_created'))\n\n def get_sorted_entry_points_including_commits(self):\n if not hasattr(self, '_sorted_entry_points_including_commits'):\n for entry_points in [self.all_entry_points, self.all_commit_entry_points]:\n for entry_point in entry_points:\n entry_point.last_created = list(entry_point.comments.all())[-1].created_at\n self._sorted_entry_points_including_commits = sorted(self.all_entry_points + self.all_commit_entry_points, key=attrgetter('last_created'))\n return self._sorted_entry_points_including_commits\n\n def get_total_review_comments_including_comments(self):\n return sum(entry_point.nb_comments for entry_point in self.get_sorted_entry_points_including_commits())\n\n def get_commits_per_day(self, include_deleted=False):\n if not self.is_pull_request:\n return []\n return GroupedCommits.group_by_day(\n self.all_commits(include_deleted, True, True) # only args for cache_method\n )\n\n def get_all_commits_per_day(self):\n return self.get_commits_per_day(True)\n\n @cached_property\n def nb_deleted_commits(self):\n return self.related_commits.filter(deleted=True).count()\n\n @cached_property\n def nb_comments_in_deleted_commits_comments(self):\n return core_models.CommitComment.objects.filter(\n commit__issues=self,\n commit__related_commits__deleted=True\n ).count()\n\n @property\n def html_content(self):\n return html_content(self)\n\n @cached_property\n def comments_count_by_path(self):\n return Counter(\n self.pr_comments.filter(\n entry_point__position__isnull=False\n ).select_related(\n 'entry_point'\n ).values_list(\n 'entry_point__path', flat=True\n )\n )\n\n def publish_notifications(self):\n for notification in self.github_notifications.select_related('user').all():\n if hasattr(self, '_repository_cache'):\n notification._repository_cache = self._repository_cache\n notification.publish()\n\n def ordered_cards(self):\n \"\"\"Order card by project/column, using prefetched info if present\"\"\"\n\n need_fetch = True\n\n if hasattr(self, '_prefetched_objects_cache') and 'cards' in self._prefetched_objects_cache:\n # cards are already prefetched\n need_fetch = False\n for card in self._prefetched_objects_cache['cards']:\n try:\n card._column_cache._project_cache\n except AttributeError:\n # column or project not in cache\n need_fetch = True\n break\n\n if not need_fetch:\n return sorted(\n self._prefetched_objects_cache['cards'],\n key=lambda card: (card.column.project.number, card.column.position)\n )\n\n if not hasattr(self, '_prefetched_objects_cache'):\n self._prefetched_objects_cache = {}\n self._prefetched_objects_cache['cards'] = self.cards.select_related(\n 'column__project'\n ).order_by(\n 'column__project__number'\n )\n return list(self._prefetched_objects_cache['cards'])\n\n def user_can_add_pr_review(self, user):\n if not self.is_pull_request:\n return False\n if not user or user.is_anonymous:\n return False\n return self.user != user\n\n def get_notification_for_user(self, user):\n return self.github_notifications.filter(user=user).first()\n\n @cached_property\n def finalized_pr_states(self):\n if not self.is_pull_request or not self.pr_states:\n return []\n\n valid_head_shas = set(\n self.files.filter(\n pull_request_head_sha__isnull=False\n ).values_list(\n 'pull_request_head_sha', flat=True\n )\n )\n\n return [\n dict(\n state,\n index=index,\n is_actual=state['head_sha'] == self.head_sha,\n date=parse(state['date']),\n hash=self.compute_pr_state_hash(state, with_date=True),\n actual_or_number='actual' if state['head_sha'] == self.head_sha else 'state #%d' % index,\n no_data=state['head_sha'] not in valid_head_shas\n )\n for index, state in\n enumerate(\n sorted(\n [\n state\n for state\n in self.pr_states.get('states', [])\n ],\n key=itemgetter('date')\n ),\n start=1\n )\n ]\n\n def get_pr_state_for_hash(self, state_hash):\n if not state_hash:\n return self.finalized_pr_states[-1]\n return [state for state in self.finalized_pr_states if state['hash'] == state_hash][0]\n\n def ajax_pr_state_files_base_url(self):\n if not hasattr(self, '_ajax_pr_state_files_base_url'):\n kwargs = self.get_reverse_kwargs()\n kwargs['state_sha'] = '0' * 40\n self._ajax_pr_state_files_base_url = reverse_lazy('front:repository:issue.files', kwargs=kwargs)\n return self._ajax_pr_state_files_base_url\n\n def ajax_pr_state_files_compare_base_url(self):\n if not hasattr(self, '_ajax_pr_state_files_compare_base_url'):\n kwargs = self.get_reverse_kwargs()\n kwargs['state_sha'] = '0' * 40\n kwargs['other_state_sha'] = '1' * 40\n self._ajax_pr_state_files_compare_base_url = reverse_lazy('front:repository:issue.files.compare', kwargs=kwargs)\n return self._ajax_pr_state_files_compare_base_url\n\n\nclass GroupedItems(list):\n \"\"\"\n An object to regroup a list of entries of the same type:\n - in a list of activities: all entries between two entries of the activity\n list are grouped together per day (\"group_in_activity\")\n - per day (\"group_by_day\")\n Also provides an 'author' method which returns a a dict with each author and\n its number of entries\n \"\"\"\n model = None\n date_field = 'created_at'\n author_field = 'user'\n\n @classmethod\n def group_in_activity(cls, activity):\n final_activity = []\n current_group = None\n\n for entry in activity:\n\n if isinstance(entry, cls.model):\n # we have a THING\n\n # create a new group if first THING in a row\n if not current_group:\n current_group = cls()\n\n # add the THING to the current group\n current_group.append(entry)\n\n else:\n # not a THING\n\n # we close the current group, group its THINGs by day, and insert\n # the resulting sub groups in the activity\n if current_group:\n final_activity.extend(cls.group_by_day(current_group))\n # we'll want to start a fresh group\n current_group = None\n\n # we add the non-THING entry in the activity\n final_activity.append(entry)\n\n # still some THINGs with nothing after, add a group with them\n if current_group:\n final_activity.extend(cls.group_by_day(current_group))\n\n return final_activity\n\n @classmethod\n def group_by_day(cls, entries, include_without_dates=False):\n if not len(entries):\n return []\n\n groups = []\n waiting = []\n\n for entry in entries:\n entry_datetime = getattr(entry, cls.date_field)\n if not entry_datetime:\n if include_without_dates:\n waiting.append(entry)\n continue\n entry_date = entry_datetime.date()\n if not groups or entry_date != groups[-1].start_date:\n groups.append(cls())\n groups[-1].start_date = entry_date\n groups[-1].created_at = entry_datetime\n if waiting:\n groups[-1] += waiting\n waiting = []\n groups[-1].append(entry)\n\n return groups\n\n @classmethod\n def get_author(cls, entry):\n author = getattr(entry, cls.author_field)\n return {\n 'username': author.username,\n 'full_avatar_url': author.full_avatar_url,\n }\n\n def authors(self):\n result = OrderedDict()\n\n for entry in self:\n author = self.get_author(entry)\n name = author['username']\n if name not in result:\n result[name] = author\n result[name]['count'] = 0\n result[name]['count'] += 1\n\n return result\n\n\nclass GroupedPullRequestComments(GroupedItems):\n model = core_models.PullRequestComment\n date_field = 'created_at'\n author_field = 'user'\n is_pr_comments_group = True # for template\n\n\nclass GroupedCommitComments(GroupedItems):\n model = core_models.CommitComment\n date_field = 'created_at'\n author_field = 'user'\n is_commit_comments_group = True # for template\n\n\nclass GroupedCommits(GroupedItems):\n model = core_models.Commit\n date_field = 'committed_at'\n author_field = 'author'\n is_commits_group = True # for template\n\n @classmethod\n def get_author(cls, entry):\n if entry.author_id:\n return super(GroupedCommits, cls).get_author(entry)\n else:\n return {\n 'username': entry.author_name,\n 'full_avatar_url': None,\n }\n\n\nclass _Commit(WithFiles, models.Model):\n class Meta:\n abstract = True\n\n date_field = 'committed_at'\n\n @property\n def splitted_message(self):\n LEN = 72\n ln_pos = self.message.find('\\n')\n if 0 <= ln_pos < LEN:\n result = [self.message[:ln_pos], self.message[ln_pos+1:]]\n while result[1] and result[1][0] == '\\n':\n result[1] = result[1][1:]\n return result\n return [self.message[:LEN], self.message[LEN:]]\n\n @property\n def all_entry_points(self):\n if not hasattr(self, '_all_entry_points'):\n self._all_entry_points = list(\n self.commit_comments_entry_points.annotate(\n nb_comments=models.Count('comments') # cannot exclude waiting_deleted for now\n ).filter(\n nb_comments__gt=0\n ).select_related(\n 'user', 'repository__owner'\n ).prefetch_related(\n 'comments__user'\n )\n )\n return self._all_entry_points\n\n @cached_property\n def comments_count_by_path(self):\n return Counter(\n self.commit_comments.filter(\n models.Q(entry_point__position__isnull=False)\n |\n models.Q(entry_point__path__isnull=True)\n ).select_related(\n 'entry_point'\n ).values_list(\n 'entry_point__path', flat=True\n )\n )\n\n @cached_property\n def count_global_comments(self):\n return self.comments_count_by_path.get(None, 0)\n\n @cached_property\n def real_author_name(self):\n return self.author.username if self.author_id else self.author_name\n\n @cached_property\n def real_committer_name(self):\n return self.committer.username if self.committer_id else self.committer_name\n\n @cached_property\n def committer_is_author(self):\n if self.author_id and self.committer_id:\n return self.author_id == self.committer_id\n if self.author_id:\n return (self.author.github_email or self.author_email) == self.committer_email\n if self.committer_id:\n return (self.committer.github_email or self.committer_email) == self.author_email\n return self.author_email == self.committer_email\n\n def get_reverse_kwargs_for_issue(self, issue):\n return dict(\n issue.get_reverse_kwargs(),\n commit_sha=self.commit.sha,\n )\n\n def get_absolute_url_for_issue(self, issue):\n from gim.front.repository.issues.views import CommitAjaxIssueView\n return reverse_lazy('front:repository:%s' % CommitAjaxIssueView.url_name,\n kwargs=self.get_reverse_kwargs_for_issue(issue))\n\n\nclass _WaitingSubscription(models.Model):\n class Meta:\n abstract = True\n\n def can_add_again(self):\n \"\"\"\n Return True if the user can add the repository again (it is allowed if\n the state is FAILED)\n \"\"\"\n return self.state == subscriptions_models.WAITING_SUBSCRIPTION_STATES.FAILED\n\n\nclass _IssueComment(FrontEditable):\n class Meta:\n abstract = True\n\n @property\n def html_content(self):\n return html_content(self)\n\n def get_reverse_kwargs(self):\n \"\"\"\n Return the kwargs to use for \"reverse\"\n \"\"\"\n return {\n 'owner_username': self.repository.owner.username,\n 'repository_name': self.repository.name,\n 'issue_number': self.issue.number,\n 'comment_pk': self.pk,\n }\n\n def get_view_url(self, url_name):\n return reverse_lazy('front:repository:%s' % url_name, kwargs=self.get_reverse_kwargs())\n\n def get_absolute_url(self):\n from gim.front.repository.issues.views import IssueCommentView\n return self.get_view_url(IssueCommentView.url_name)\n\n def get_edit_url(self):\n from gim.front.repository.issues.views import IssueCommentEditView\n return self.get_view_url(IssueCommentEditView.url_name)\n\n def get_delete_url(self):\n from gim.front.repository.issues.views import IssueCommentDeleteView\n return self.get_view_url(IssueCommentDeleteView.url_name)\n\n\nclass _PullRequestComment(FrontEditable):\n class Meta:\n abstract = True\n\n @property\n def html_content(self):\n return html_content(self)\n\n def get_reverse_kwargs(self):\n \"\"\"\n Return the kwargs to use for \"reverse\"\n \"\"\"\n return {\n 'owner_username': self.repository.owner.username,\n 'repository_name': self.repository.name,\n 'issue_number': self.issue.number,\n 'comment_pk': self.pk,\n }\n\n def get_view_url(self, url_name):\n return reverse_lazy('front:repository:%s' % url_name, kwargs=self.get_reverse_kwargs())\n\n def get_absolute_url(self):\n from gim.front.repository.issues.views import PullRequestCommentView\n return self.get_view_url(PullRequestCommentView.url_name)\n\n def get_edit_url(self):\n from gim.front.repository.issues.views import PullRequestCommentEditView\n return self.get_view_url(PullRequestCommentEditView.url_name)\n\n def get_delete_url(self):\n from gim.front.repository.issues.views import PullRequestCommentDeleteView\n return self.get_view_url(PullRequestCommentDeleteView.url_name)\n\n\nclass _CommitComment(FrontEditable):\n class Meta:\n abstract = True\n\n @property\n def html_content(self):\n return html_content(self)\n\n def get_reverse_kwargs_for_issue(self, issue):\n return dict(\n issue.get_reverse_kwargs(),\n commit_sha=self.commit.sha,\n comment_pk=self.pk,\n )\n\n def get_absolute_url_for_issue(self, issue):\n from gim.front.repository.issues.views import CommitCommentView\n return reverse_lazy('front:repository:%s' % CommitCommentView.url_name,\n kwargs=self.get_reverse_kwargs_for_issue(issue))\n\n\nclass _GithubNotification(models.Model):\n class Meta:\n abstract = True\n\n def get_edit_url(self):\n return reverse_lazy('front:github-notifications:edit', kwargs={'notif_id': self.pk})\n\n @classmethod\n def get_last_url(cls):\n return reverse_lazy('front:github-notifications:last')\n\n\nclass _PullRequestReview(Hashable, FrontEditable):\n\n class Meta:\n abstract = True\n\n is_pull_request_review = True\n\n @property\n def hash_values(self):\n return self.author_id, (self.submitted_at or ''), self.state\n\n def get_related_issues(self):\n \"\"\"\n Return a list of all issues related to this review(ie only one!)\n \"\"\"\n return core_models.Issue.objects.filter(pk=self.issue_id)\n\n @property\n def created_at(self):\n return self.submitted_at\n\n @property\n def user_id(self):\n return self.author_id\n\n @property\n def user(self):\n return self.author\n\n @property\n def html_content(self):\n return html_content(self)\n\n def get_reverse_kwargs(self):\n \"\"\"\n Return the kwargs to use for \"reverse\"\n \"\"\"\n return {\n 'owner_username': self.repository.owner.username,\n 'repository_name': self.repository.name,\n 'issue_number': self.issue.number,\n 'review_pk': self.pk,\n }\n\n def get_view_url(self, url_name):\n return reverse_lazy('front:repository:%s' % url_name, kwargs=self.get_reverse_kwargs())\n\n def get_absolute_url(self):\n from gim.front.repository.issues.views import PullRequestReviewView\n return self.get_view_url(PullRequestReviewView.url_name)\n\n def get_edit_url(self):\n from gim.front.repository.issues.views import PullRequestReviewEditView\n return self.get_view_url(PullRequestReviewEditView.url_name)\n\n def save(self, force_insert=False, force_update=False, using=None, update_fields=None):\n\n is_new = not bool(self.pk)\n\n if not update_fields or 'body' in update_fields:\n self.body_html = html_content(self) if self.body else ''\n if update_fields:\n update_fields.append('body_html')\n\n if is_new and self.front_uuid:\n self.issue.front_uuid = self.front_uuid\n\n self.old_save(force_insert, force_update, using, update_fields)\n\n\nclass _Project(Hashable, FrontEditable):\n body_html = models.TextField(blank=True, null=True)\n\n class Meta:\n abstract = True\n\n def save(self, force_insert=False, force_update=False, using=None, update_fields=None):\n\n if not update_fields or 'body' in update_fields:\n self.body_html = html_content(self) if self.body else ''\n if update_fields:\n update_fields.append('body_html')\n\n self.old_save(force_insert, force_update, using, update_fields)\n\n @property\n def hash_values(self):\n \"\"\"\n Hash for this object representing its state at the current time, used to\n know if we have to reset an issue's cache\n \"\"\"\n return self.name\n\n def get_related_issues(self):\n \"\"\"\n Return a list of all issues related to this project\n \"\"\"\n return core_models.Issue.objects.filter(cards__column__project=self)\n\n def get_reverse_kwargs(self):\n \"\"\"\n Return the kwargs to use for \"reverse\"\n \"\"\"\n return {\n 'owner_username': self.repository.owner.username,\n 'repository_name': self.repository.name,\n 'project_number': self.number,\n }\n\n def get_view_url(self, url_name):\n return reverse_lazy('front:repository:%s' % url_name, kwargs=self.get_reverse_kwargs())\n\n def get_absolute_url(self):\n from gim.front.repository.board.views import BoardView\n return reverse_lazy('front:repository:%s' % BoardView.url_name, kwargs={\n 'owner_username': self.repository.owner.username,\n 'repository_name': self.repository.name,\n 'board_mode': 'project',\n 'board_key': self.number,\n }) + '?sort=position&direction=asc'\n\n def get_summary_url(self):\n if self.number:\n from gim.front.repository.board.views import ProjectSummaryView\n return self.get_view_url(ProjectSummaryView.url_name)\n else:\n from gim.front.repository.board.views import NewProjectSummaryView\n kwargs = self.get_reverse_kwargs()\n del kwargs['project_number']\n kwargs['project_id'] = self.pk\n return reverse_lazy('front:repository:%s' % NewProjectSummaryView.url_name, kwargs=kwargs)\n\n def get_edit_url(self):\n from gim.front.repository.board.views import ProjectEditView\n return self.get_view_url(ProjectEditView.url_name)\n\n def get_delete_url(self):\n from gim.front.repository.board.views import ProjectDeleteView\n return self.get_view_url(ProjectDeleteView.url_name)\n\n def get_create_column_url(self):\n from gim.front.repository.board.views import ColumnCreateView\n return self.get_view_url(ColumnCreateView.url_name)\n\n\nclass _Column(Hashable, FrontEditable):\n class Meta:\n abstract = True\n\n @property\n def hash_values(self):\n \"\"\"\n Hash for this object representing its state at the current time, used to\n know if we have to reset an issue's cache\n \"\"\"\n return self.name, (self.position or '')\n\n def get_related_issues(self):\n \"\"\"\n Return a list of all issues related to this column\n \"\"\"\n return core_models.Issue.objects.filter(cards__column=self)\n\n def get_reverse_kwargs(self):\n \"\"\"\n Return the kwargs to use for \"reverse\"\n \"\"\"\n return {\n 'owner_username': self.project.repository.owner.username,\n 'repository_name': self.project.repository.name,\n 'project_number': self.project.number,\n 'column_id': self.pk,\n }\n\n def get_view_url(self, url_name):\n return reverse_lazy('front:repository:%s' % url_name, kwargs=self.get_reverse_kwargs())\n\n def get_absolute_url(self):\n from gim.front.repository.board.views import BoardProjectColumnView\n return reverse_lazy('front:repository:%s' % BoardProjectColumnView.url_name, kwargs={\n 'owner_username': self.project.repository.owner.username,\n 'repository_name': self.project.repository.name,\n 'board_mode': 'project',\n 'board_key': self.project.number,\n 'column_key': self.pk,\n }) + '?sort=position&direction=asc'\n\n def get_edit_url(self):\n from gim.front.repository.board.views import ColumnEditView\n return self.get_view_url(ColumnEditView.url_name)\n\n def get_info_url(self):\n from gim.front.repository.board.views import ColumnInfoView\n return self.get_view_url(ColumnInfoView.url_name)\n\n def get_delete_url(self):\n from gim.front.repository.board.views import ColumnDeleteView\n return self.get_view_url(ColumnDeleteView.url_name)\n\n def get_can_move_url(self):\n from gim.front.repository.board.views import ColumnCanMoveView\n return self.get_view_url(ColumnCanMoveView.url_name)\n\n def get_move_url(self):\n from gim.front.repository.board.views import ColumnMoveView\n return self.get_view_url(ColumnMoveView.url_name)\n\n def get_create_note_url(self):\n from gim.front.repository.board.views import CardNoteCreateView\n return self.get_view_url(CardNoteCreateView.url_name)\n\n\nclass _Card(Hashable, FrontEditable):\n note_html = models.TextField(blank=True, null=True)\n\n RENDERER_IGNORE_FIELDS = {}\n\n class Meta:\n abstract = True\n\n def save(self, force_insert=False, force_update=False, using=None, update_fields=None):\n\n if self.type == self.CARDTYPE.NOTE:\n if not update_fields or 'note' in update_fields:\n self.note_html = html_content(self, 'note', force=True) if self.note else ''\n if update_fields:\n update_fields.append('note_html')\n\n self.old_save(force_insert, force_update, using, update_fields)\n\n @property\n def hash_values(self):\n \"\"\"\n Hash for this object representing its state at the current time, used to\n know if we have to reset an issue's cache\n \"\"\"\n return (self.column_id or ''), str(self.position or '')\n\n def get_related_issues(self):\n \"\"\"\n Return a list of all issues related to this card (ie only one!)\n \"\"\"\n return core_models.Issue.objects.filter(cards=self)\n\n def get_reverse_kwargs(self):\n \"\"\"\n Return the kwargs to use for \"reverse\"\n \"\"\"\n return {\n 'owner_username': self.repository.owner.username,\n 'repository_name': self.repository.name,\n 'project_number': self.column.project.number,\n 'card_pk': self.pk,\n }\n\n def get_view_url(self, url_name):\n return reverse_lazy('front:repository:%s' % url_name, kwargs=self.get_reverse_kwargs())\n\n def get_absolute_url(self):\n from gim.front.repository.board.views import CardNoteView\n return self.get_view_url(CardNoteView.url_name)\n\n def get_edit_url(self):\n from gim.front.repository.board.views import CardNoteEditView\n return self.get_view_url(CardNoteEditView.url_name)\n\n def get_delete_url(self):\n from gim.front.repository.board.views import CardNoteDeleteView\n return self.get_view_url(CardNoteDeleteView.url_name)\n\n\nclass H(lmodel.RedisModel):\n \"\"\"Stores hash of some modedls\n\n This model uses some tricks to optimize memory:\n - save non-hex version of the hash instead of the hex version\n - use a single letter to represent each django model\n - single letter for the current class (H instead of Hash)\n - use the \"Using hashes to abstract a very memory efficient plain key-value store on top of Redis\"\n described on https://redis.io/topics/memory-optimization\n\n\n \"\"\"\n database = get_main_limpyd_database()\n collection_manager = ExtendedCollectionManager\n\n models = {\n 'GithubUser': 'a',\n 'Repository': 'b',\n 'LabelType': 'c',\n 'Label': 'd',\n 'Milestone': 'e',\n 'Issue': 'f',\n 'PullRequestReview': 'g',\n 'Project': 'h',\n 'Column': 'i',\n 'Card': 'j',\n }\n\n pk = lfields.PKField()\n h = lfields.HashField()\n\n @classmethod\n def get_pk_and_hash_key(cls, model_name, pk):\n pk = str(pk)\n part_for_pk, hash_key = pk[:-2], pk[-2:]\n return '%s:%s' % (\n cls.models[model_name],\n part_for_pk\n ), hash_key\n\n @classmethod\n def get_or_connect_for_obj(cls, obj):\n pk, hash_key = cls.get_pk_and_hash_key(obj.model_name, obj.pk)\n hash_obj, created = cls.get_or_connect(pk=pk)\n if not created:\n created = not hash_obj.h.hexists(hash_key)\n return hash_obj, created\n\n @staticmethod\n def extract_hash_key_from_pk(pk):\n return str(pk)[-2:]\n\n def set_for_obj(self, obj, hash, is_hex=True):\n hash_key = self.extract_hash_key_from_pk(obj.pk)\n if is_hex:\n hash = hash.decode('hex')\n self.h.hset(hash_key, hash)\n\n def get_for_obj(self, obj):\n hash_key = self.extract_hash_key_from_pk(obj.pk)\n result = self.h.hget(hash_key)\n if result:\n try:\n result = result.encode('hex')\n except Exception:\n return None\n return result\n","sub_path":"gim/front/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":65103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"44448255","text":"from datetime import datetime\n\nfrom django.test import TestCase\nimport mock\n\nfrom FoodBank.foodbank.factories import FarmerFactory, ProduceFactory\n\n\nclass StaticViewTest(TestCase):\n\n def test_home_uses_template(self):\n response = self.client.get('/')\n self.assertTemplateUsed(response,'home.html')\n\n def test_about_uses_template(self):\n response = self.client.get('/about_us/')\n self.assertTemplateUsed(response,'about.html')\n\n def test_contact_uses_template(self):\n response = self.client.get('/contact/')\n self.assertTemplateUsed(response,'contact.html')\n\nclass FarmerListViewTest(TestCase):\n\n def test_farmer_list_uses_template(self):\n response = self.client.get('/farmers/')\n self.assertTemplateUsed(response,'farmer_list.html')\n\n def test_farmer_list_passes_farmer_list(self):\n farmer1 = FarmerFactory()\n farmer2 = FarmerFactory()\n response = self.client.get('/farmers/')\n self.assertTrue( farmer1 , response.context['farmers'] )\n self.assertTrue( farmer2 , response.context['farmers'] )\n\nclass FarmerDetailViewTest(TestCase):\n\n def setUp(self):\n self.farmer = FarmerFactory()\n self.farmer.save()\n\n def test_farmer_detail_uses_template(self):\n response = self.client.get('/farmer/{id}/'.format(id=self.farmer.id))\n self.assertTemplateUsed(response,'farmer_detail.html')\n\n def test_farmer_detail_passes_farmer(self):\n response = self.client.get('/farmer/{id}/'.format(id=self.farmer.id))\n self.assertEqual( self.farmer, response.context['farmer'] )\n\n\n\nclass FarmerModel(TestCase):\n\n def setUp(self):\n self.farmer = FarmerFactory.create()\n self.produce1 = ProduceFactory.create(farmer=self.farmer,\n collection_start=datetime(2013,3,1), collection_end=datetime(2013,3,2))\n self.produce2 = ProduceFactory.create(farmer=self.farmer,\n collection_start=datetime(2013,3,4), collection_end=datetime(2013,3,6))\n self.produce3 = ProduceFactory.create(farmer=self.farmer,\n collection_start=datetime(2013,3,20), collection_end=datetime(2013,4,1))\n self.produce4 = ProduceFactory.create()\n\n\n @mock.patch('utils.datetime.now')\n def test_current_produce(self, mock_now):\n mock_now.return_value = datetime(2013,3,4,12,30)\n self.assertTrue(not self.produce1 in self.farmer.current_produce)\n self.assertTrue(self.produce2 in self.farmer.current_produce)\n self.assertTrue(not self.produce3 in self.farmer.current_produce)\n self.assertTrue(not self.produce4 in self.farmer.current_produce)\n\n @mock.patch('utils.datetime.now')\n def test_available_produce(self, mock_now):\n mock_now.return_value = datetime(2013,3,4,12,30)\n self.assertTrue(not self.produce1 in self.farmer.available_produce)\n self.assertTrue(self.produce2 in self.farmer.available_produce)\n self.assertTrue(self.produce3 in self.farmer.available_produce)\n self.assertTrue(not self.produce4 in self.farmer.available_produce)\n\n @mock.patch('utils.datetime.now')\n def test_future_produce(self, mock_now):\n mock_now.return_value = datetime(2013,3,4,12,30)\n self.assertTrue(not self.produce1 in self.farmer.future_produce)\n self.assertTrue(not self.produce2 in self.farmer.future_produce)\n self.assertTrue(self.produce3 in self.farmer.future_produce)\n self.assertTrue(not self.produce4 in self.farmer.future_produce)","sub_path":"FoodBank/foodbank/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"73781066","text":"#!/usr/bin/env python\n#-*- coding:utf-8 -*-\n\"\"\"\n@auther: ryanlee\n@time: 2018/12/7 15:28\n\"\"\"\nimport logging, unittest, time\nfrom BSTestRunner import BSTestRunner\nfrom common_view import common_func\n\n#指定测试用例和测试报告的路径\ntest_dir= '../test_case'\nreport_dir= '../reports'\n\n#加载测试用例\ndiscover= unittest.defaultTestLoader.discover(test_dir, pattern= 'test_login.py')\n\n#定义报告的文件格式\nnow= time.strftime('%Y-%m-%d-%H_%M_%S')\nreport_name= report_dir+ '/'+ now+ ' test_report.html'\n\n#运行用例并生成测试报告\nwith open(report_name, 'wb') as f:\n runner= BSTestRunner(stream= f, title= 'Kyb Test Report', description= 'Kyb Android App Test Report')\n logging.info('start run testcase')\n runner.run(discover)\n\nlogging.info('find latest report...')\n# 查找最新的测试报告\nlatest_report = common_func.latest_report(report_dir)\n# 邮件发送报告\nlogging.info('send email report...')\ncommon_func.send_mail(latest_report)\nlogging.info('test end!')","sub_path":"mymenoy_project/test_run/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"479062418","text":"# -*- coding: utf-8 -*-\nfrom Tkinter import *\nimport tkMessageBox as MsgBox\n#from BD_InterfaceBaseDonnees import *\n#from BaseDonnees import *\n \n#import os\n#os.environ['PATH'] =\"C:/Oracle/Client/bin\" + os.pathsep +os.environ['PATH'] \n#import cx_Oracle\nclass VueInitialisation(Frame):\n def __init__(self,master,parent,**cnf):\n KeysList=cnf.keys()\n self.parent = parent\n self.master=master\n self.width=400\n self.height=200\n if ('width' in KeysList):\n self.width=cnf['width']\n if ('width' in KeysList):\n self.height=cnf['height']\n Frame.__init__(self,master,cnf) \n self.master.update()\n self.x=self.master.winfo_width()/2-(self.width/2)\n self.y=self.master.winfo_height()/2-(self.height/2) \n self.place(x=self.x,y=self.y)\n self.l1=Listbox(self)\n self.l1.grid(row=0, column=0)\n self.l1.insert(END, \"Emp\")\n self.l1.insert(END, \"Dept\")\n self.l1.insert(END, \"Client\")\n self.l1.insert(END, \"inventaire\")\n self.l1.insert(END, \"Produit\")\n self.l1.insert(END, \"Commande\")\n self.l1.insert(END, \"Item\")\n self.l1.insert(END, \"Region\")\n self.l1.insert(END, \"Poste\")\n self.l1.insert(END, \"Depot\")\n self.l2=Listbox(self)\n \n self.l2.grid(row=0, column=2)\n self.center=Frame(self)\n self.B=Button(self.center, text=\">\", command=self.insertElement)\n self.B1=Button(self.center, text=\"<\", command=self.deleteElement)\n self.bouton= Button(self,text=\"Creer\", command=self.CreerTables)\n self.bouton.grid(row=1, column=2)\n self.B.pack()\n self.B1.pack()\n self.center.grid(row=0, column=1)\n #self.center.pack()\n \n #self.BaseDonnees= CommunicationBaseDonnees()\n #self.BaseDonnees.DemarrerConnection(\"b52equip1\", \"var2\", \"titan\")\n \n def CreerTables(self):\n listeNomsTables={'Emp':tableEmp,\n 'Dept':tableDept,\n 'Client':tableClient,\n 'inventaire':tableInventaire,\n 'Produit':tableProduit,\n 'Commande':tableCommande,\n 'Item':tableItem,\n 'Region':tableRegion, \n 'Poste':tablePoste,\n 'Depot':tableDepot}\n\n listeTables=[] \n for i in range(self.l2.size()): \n Reponse = self.parent.BaseDonnees.CreerTable(listeNomsTables[self.l2.get(i)])\n if (Reponse[0]==0):\n MsgBox.showerror('PERP | Cr�ation des tables','Une erreur c\\'est produite'+\\\n Reponse[1])\n def insertElement(self):\n selection=self.l1.curselection()\n self.l2.insert(END, self.l1.get(selection[0]))\n self.l1.delete(selection)\n pass\n \n def deleteElement(self):\n delete=self.l2.curselection()\n self.l1.insert(END, self.l2.get(delete[0]))\n self.l2.delete(delete)\n pass\n \n def tableEmp (self):\n self.listeEmp=[]\n self.listeEmp.append(xChamp(nom=\"Id\", type=\"NUMBER\", taille=\"7\"))\n self.listeEmp.append(xChamp(nom=\"last_name\", type=\"VARCHAR2\", taille=\"25\"))\n self.listeEmp.append(xChamp(nom=\"first_name\",type=\"VARCHAR2\", taille=\"25\"))\n self.listeEmp.append(xChamp(nom=\"start_date\", type=\"Date\"))\n self.listeEmp.append(xChamp(nom=\"manager_id\", type=\"VARCHAR2\", taille=\"25\"))\n self.listeEmp.append(xChamp(nom=\"title\", type=\"VARCHAR2\", taille=\"25\"))\n self.listeEmp.append(xChamp(nom=\"Dept_id\", type=\"NUMBER\", taille=\"7\"))\n self.listeEmp.append(xChamp(nom=\"Salary\", type=\"NUMBER\", taille=\"11\", precision=\"2\"))\n \n self.matableEmp=xTable(nom=\"table_Emp\", listechamps=self.listeEmp)\n return self.matableEmp\n \n def tableDept (self):\n self.listeDept=[]\n self.listeDept.append(xChamp(nom=\"Id\", type=\"entier\", taille=\"7\"))\n self.listeDept.append(xChamp(nom=\"name\", type=\"texte\", taille=\"25\"))\n self.listeDept.append(xChamp(nom=\"region_id\", type=\"entier\", taille=\"7\" ))\n \n self.matableDept=xTable(nom=\"table_dept\", listechamps=self.listeDept)\n return self.matableDept\n \n def tableClient (self):\n self.listeClient=[]\n self.listeClient.append(xChamp(nom=\"Id\",type=\"entier\", taille=\"7\"))\n self.listeClient.append(xChamp(nom=\"last_name\", type=\"texte\", taille=\"25\"))\n self.listeClient.append(xChamp(nom=\"first_name\",type=\"texte\", taille=\"25\" ))\n self.listeClient.append(xChamp(nom=\"address\", type=\"texte\", taille=\"400\"))\n self.listeClient.append(xChamp(nom=\"city\", type=\"texte\", taille=\"30\"))\n self.listeClient.append(xChamp(nom=\"state\", type=\"texte\", taille=\"25\"))\n self.listeClient.append(xChamp(nom=\"country\", type=\"texte\", taille=\"25\")) \n self.listeClient.append(xChamp(nom=\"zip_code \", type=\"texte\", taille=\"20\"))\n self.listeClient.append(xChamp(nom=\"sales_rep_id \", type=\"entier\", taille=\"7\"))\n self.listeClient.append(xChamp(nom=\"credit_rating\", Type=\"VARCHAR2(9)\", precision=\"\"))\n self.listeClient.append(xChamp(nom=\"region_id\", type=\"entier\", taille=\"7\"))\n \n self.matableClient=xTable(nom=\"table_Client\", listechamps=self.listeClient)\n return self.matableClient\n \n \n def tableRegion (self):\n self.listeRegion=[]\n self.listeRegion.append(xChamp(nom=\"Id\", type=\"entier\", taille=\"7\"))\n self.listeRegion.append(xChamp(nom=\"name\", type=\"texte\", taille=\"25\"))\n \n self.matableRegion=xTable(nom=\"table_Region\", listechamps=self.listeRegion)\n return self.matableRegion\n \n \n def tablePoste (self):\n self.listePoste=[]\n self.listePoste.append(xChamp(nom=\"title\", type=\"texte\", taille=\"25\"))\n \n self.matablePoste=xTable(nom=\"table_dept\", listechamps=self.listePste)\n return self.matablePoste\n \n \n def tableCommande (self):\n self.listeCde=[]\n self.listeCde.append(xChamp(nom=\"Id\", type=\"entier\", taille=\"7\"))\n self.listeCde.append(xChamp(nom=\"customer_id\", type=\"entier\", taille=\"7\"))\n self.listeCde.append((xChamp(nom=\"date_ordered\", type=\"date\")))\n self.listeCde.append((xChamp(nom=\"date_shipped\", type=\"entier\", taille=\"7\")))\n self.listeCde.append(xChamp(nom=\"sales_rep_id\", type=\"entier\", taille=\"7\"))\n self.listeCde.append(xChamp(nom=\"total\", type=\"entier\", taille=\"11\", precision=\"2\"))\n self.listeCde.append(xChamp(nom=\"payment_type\", type=\"texte\", taille=\"9\"))\n self.listeCde.append(xChamp(nom=\"order_filled\", type=\"texte\", taille=\"1\"))\n \n self.matableCommande=xTable(nom=\"table_Commande\", listechamps=self.listeCommande)\n return self.matableCommande\n \n def tableDepot (self):\n self.listeDepot=[]\n self.listeDepot.append(xChamp(nom=\"Id\", type=\"entier\", taille=\"7\"))\n self.listeDepot.append(xChamp(nom=\"address\", Type=\"varchar2(400)\", precision=\"\"))\n self.listeDepot.append(xChamp(nom=\"city\", type=\"texte\", taille=\"30\"))\n self.listeDepot.append(xChamp(nom=\"state\", type=\"texte\", taille=\"25\"))\n self.listeDepot.append(xChamp(nom=\"country\", type=\"texte\", taille=\"25\")) \n self.listeDepot.append(xChamp(nom=\"zip_code\", type=\"texte\", taille=\"20\"))\n self.listeDepot.append(xChamp(nom=\"phone\", type=\"texte\", taille=\"25\"))\n self.listeDepot.append(xChamp(nom=\"manager_id \", type=\"entier\", taille=\"7\"))\n \n self.matableDepot=xTable(nom=\"table_Depot\", listechamps=self.listeDepot)\n return self.matableDepot\n \n def tableInventaire(self): \n self.listeInventaire=[]\n self.listeInventaire.append(xChamp(nom=\"product_id\", type=\"entier\", taille=\"7\"))\n self.listeInventaire.append(xChamp(nom=\"warehouse_id\", type=\"entier\", taille=\"7\"))\n self.listeInventaire.append(xChamp(nom=\"amount_in_stock\", type=\"entier\", taille=\"9\"))\n self.listeInventaire.append(xChamp(nom=\"reorder_point\", type=\"entier\", taille=\"9\"))\n self.listeInventaire.append(xChamp(nom=\"max_in_stock\", type=\"entier\", taille=\"9\")) \n \n self.matableInventaire=xTable(nom=\"table_Inventaire\", listechamps=self.listeInventaire)\n return self.matableInventaire\n \n def tableItem(self): \n self.listeItem=[]\n self.listeItem.append(xChamp(nom=\"ord_id\", type=\"entier\", taille=\"7\"))\n self.listeItem.append(xChamp(nom=\"item_id \", type=\"entier\", taille=\"7\"))\n self.listeItem.append(xChamp(nom=\"product_id\", type=\"entier\", taille=\"7\")) \n self.listeItem.append(xChamp(nom=\"price\", type=\"entier\", taille=\"11\", precision=\"2\"))\n self.listeItem.append(xChamp(nom=\"quantity\", type=\"entier\", taille=\"9\"))\n self.listeItem.append(xChamp(nom=\"quantity_shipped\", type=\"entier\", taille=\"9\"))\n \n self.matableItem=xTable(nom=\"table_Item\", listechamps=self.listeItem)\n return self.matableItem\n \n \n def tableProduit(self): \n self.listeProduit=[]\n self.listeProduit.append(xChamp(nom=\"product_id\", type=\"entier\", taille=\"7\"))\n self.listeProduit.append(xChamp(nom=\"name\", type=\"texte\", taille=\"50\")) \n self.listeProduit.append(xChamp(nom=\"price\", type=\"entier\", taille=\"11\", precision=\"2\"))\n self.listeProduit.append(xChamp(nom=\"quantity\", type=\"entier\", taille=\"9\"))\n \n self.matableProduit=xTable(nom=\"table_Produit\", listechamps=self.listeProduit)\n return self.matableProduit\n \n \n ","sub_path":"Recyclage/Vue_Initialisation.py","file_name":"Vue_Initialisation.py","file_ext":"py","file_size_in_byte":9711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"464535397","text":"# Description of the task and judge system:\n# https://judge.softuni.bg/Contests/Practice/Index/425?fbclid=IwAR1WJ83LNxAX6nfOOyKsl7yA1RA7VcRj1Qvyc32X2X_LJh8-DDyLwHl3UzA#5\n\n\ndef growing(grapes, n):\n for times in range(n):\n grapes_copy = grapes.copy()\n for i in range(1, len(grapes)-1):\n if grapes[i-1] < grapes[i] > grapes[i+1]: # Classified as greater grape\n grapes[i] += 1\n if grapes[i-1] > 0:\n grapes[i-1] -= 1\n grapes[i] += 1\n if grapes[i+1] > 0:\n grapes[i+1] -= 1\n grapes[i] += 1\n\n # Incrementing all grapes that has not been changed\n # ______________________________________________________\n for e in range(len(grapes)):\n if grapes_copy[e] == grapes[e]: # if the value has not been changed\n if grapes[e] > 0:\n grapes[e] += 1\n # ______________________________________________________\n\n\ndef solve(grapes, n):\n grapes_copy = grapes.copy()\n while len(grapes_copy) >= n:\n growing(grapes, n)\n grapes_copy = [e for e in grapes if e > n] # filtering the list by removing the numbers less or equal to n\n for e in range(len(grapes)):\n if grapes[e] < n:\n grapes[e] = 0\n return ' '.join(map(str, grapes_copy))\n\n\ndef main():\n grapes = [int(e) for e in input().split()]\n n = int(input())\n print(solve(grapes, n))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Python/Python-Fundamentals/3.2Lists_Advanced/0.WineCraft.py","file_name":"0.WineCraft.py","file_ext":"py","file_size_in_byte":1533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"42728947","text":"import course\nimport assignment\nimport datetime\nimport re\nimport string\n\n\nfrom robobrowser import RoboBrowser\nfrom bs4 import BeautifulSoup\n\n# Global variables\nstudentName = \"\"\ncourses = list(map(course.Course,[]))\n\n# Body\ndef main():\n br = RoboBrowser(parser='html.parser')\n\n mainpageGrades = extractMainPage(br)\n\n\ndef extractMainPage(robo):\n br = robo\n br.open(\"https://parents.chclc.org/genesis/sis/view?gohome=true\")\n form = br.get_form()\n form[\"j_username\"] = \"3010476@chclc.org\"\n form[\"j_password\"] = \"y7cvmz2d\"\n br.submit_form(form)\n\n #Converts the HTML of the grade page into a string\n src = str(br.parsed())\n\n #Removes initial Javascript\n src = src[src.find('')+len(''): len(src)]\n\n #Removes all HTML tags\n soup = BeautifulSoup(src,\"lxml\")\n src = ''.join(soup.findAll(text=True))\n \n #Removes all tabs and newlines\n src = \" \".join(src.split())\n studentName = src[src.index(\"Select Student:\")+16:src.index(\"Weekly Summary\")-1] \n\n #Cuts the string into the important parts\n notDone = True\n i = src.find('Fri')+3 #Consistent and close reference point\n while(notDone):\n i += 1\n if(ord(src[i])>=57):\n notDone = False\n src = src[i+2:src.rfind('%')+1]\n \n #Parses the text\n courseInfo = src.split('%')\n courseInfo.pop()\n for i in range(len(courseInfo)):\n courseInfo[i] = courseInfo[i].split(\"Email:\")\n if(i != 0):\n courseInfo[i][0] = courseInfo[i][0][11:len(courseInfo[i][0])]\n \n \n # Separate info\n cInfo = courseInfo[i][0]\n cName = cInfo[0:cInfo[0:cInfo.index(\",\")].rfind(\" \")]\n cTeacher = cInfo[cInfo[0:cInfo.index(\",\")].rfind(\" \")+1:len(cInfo)-1]\n\n # Create course\n c = course.Course(cName,cTeacher,\"P\")\n # Add assignments from List Assignments tab (click on link) \n # Example: c.addAssignment(\"a1\",10,10,assignment.Category.MajorAssessment,datetime.datetime.today().date)\n courses.append(c)\n \n # Test by printing info\n print(\"Student: \" + studentName)\n for i in range(len(courses)):\n print(str(courses[i].currentMPGrade) + \" in \" + courses[i].courseName \n + \" with \" + courses[i].teacherName + \" during \" + courses[i].period)\n\n return courseInfo\n \n\nif __name__ == '__main__':\n main()\n\n\n","sub_path":".vscode/extractor.py","file_name":"extractor.py","file_ext":"py","file_size_in_byte":2387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"81436203","text":"import random as rand\n\n# Sector Class\n\nclass Sector:\n\tdef __init__(self, x, y, ruler=\"Indepented\", speciality=None, capital=False):\n\t\tself.x = x\n\t\tself.y = y\n\t\tself.ruler = ruler\n\t\tself.speciality = speciality\n\t\tself.population = rand.randint(104, 24650)\n\t\tif ruler != None and capital == False:\n\t\t\tself.supporters = rand.randint(1, 108)\n\t\t\tself.rebels = 0\n\t\t\tself.neutrals = self.population - self.supporters\n\t\telif ruler != None and capital == True:\n\t\t\tself.neutrals = rand.randint(1, 1050)\n\t\t\tself.supporters = self.population - self.neutrals\n\t\t\tself.rebels = 0\n\t\t\n\t\t\n","sub_path":"sector.py","file_name":"sector.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"564467019","text":"# Given a month (int between 1 and 12), print numbers of days in that month \n\n# 30 - 9,4,6,11\n# 31 all the rest \n# 28 - 2\n\ndef days(num):\n if num in [9,4,6,11]:\n return 30 \n elif num == 2 :\n return 28 \n else: \n return 31\n \n\nprint(days(1)) # 31 \nprint(days(9)) # 30\nprint(days(2)) # 28","sub_path":"CTI/replit_python_practice/if_else/days_in_month.py","file_name":"days_in_month.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"186983079","text":"# openStat - session.py\nfrom aux import *\n\n\nclass _variable:\n tag, e = None, 0.0\n def __init__(self, tag, data, isData):\n self.tag = tag\n if isData: \n self.e = sum(data) / len(data)\n\nclass Session:\n\n data, variables = [], []\n\n def load_data(self, file_path, header_row):\n with open(file_path, \"r\") as data_file:\n if header_row: \n data_file.readline()\n for line in data_file.readlines():\n # NEED REAL PARSING HERE\n data.append([ conv(line[i]) for i in range(len(line)) ])\n continue\n data_file.close()\n \n def set_variables(self, variables):\n self.variables = [ \\\n _variable(tag, [x[i-1] for x in self.data], data != []) for i, tag in variables.items() \\\n ]\n \n\n def __init__(self, file_path=None, header_row=True, variables=None):\n if file_path != None: self.load_data(file_path, header_row)\n if variables != None: self.set_variables(variables)\n # find the means, SDs, expectations, \n \n \n \n\n \n ","sub_path":"openStat/session.py","file_name":"session.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"91752964","text":"#https://pl.spoj.com/problems/JSZYCER/\r\n\r\nimport string\r\nchars=string.ascii_uppercase\r\nchars = list(chars)\r\nhaslo = str(input(\"Wprowadz zdanie do zaszyfrowania: \"))\r\nzaszyfrowane = []\r\nhaslo = list(haslo.upper())\r\nfor i in range(0, len(haslo)):\r\n if haslo[i] != \" \":\r\n if chars.index(haslo[i]) > 22:\r\n haslo[i] = chars[chars.index(haslo[i])-23]\r\n zaszyfrowane.append(haslo[i])\r\n else:\r\n haslo[i] = chars[chars.index(haslo[i]) + 3]\r\n zaszyfrowane.append(haslo[i])\r\n else:\r\n haslo[i] = \" \"\r\n zaszyfrowane.append(haslo[i])\r\nzaszyfrowane = \"\".join(zaszyfrowane)\r\nprint(\"Zaszyfrowana wiadomość: \", zaszyfrowane)\r\n","sub_path":"Szyfr_Cezara.py","file_name":"Szyfr_Cezara.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"556247192","text":"#! -*- coding: utf-8 -*-\nimport sys\nimport os\nfrom JapaneseTokenizer import MecabWrapper\nfrom JapaneseTokenizer import TokenizedSenetence\nfrom JapaneseTokenizer import FilteredObject\nfrom JapaneseTokenizer.datamodels import TokenizedResult\n__author__ = 'kensuke-mi'\n\n\ndef basic_example():\n # ========================================================\n # TOKENIZE\n # ========================================================\n\n # input is `unicode` type(in python2x)\n # In python3x, you don't mind it\n sentence = u'テヘラン(ペルシア語: تهران ; Tehrān Tehran.ogg 発音[ヘルプ/ファイル]/teɦˈrɔːn/、英語:Tehran)は、西アジア、イランの首都でありかつテヘラン州の州都。人口12,223,598人。都市圏人口は13,413,348人に達する。'\n\n # make MecabWrapper object\n # osType is generic or centos. it's because Mecab has different system command in CentOs.\n # If you're using this in CentsOs, put \"centos\"\n osType = \"generic\"\n\n # you can choose from \"neologd\", \"all\", \"ipaddic\", \"user\", \"\"\n # \"ipadic\" and \"\" is equivalent\n dictType = \"\"\n\n mecab_wrapper = MecabWrapper(dictType=dictType, osType=osType)\n\n # tokenize sentence. Returned object is list of tuples\n tokenized_obj = mecab_wrapper.tokenize(sentence=sentence)\n assert isinstance(tokenized_obj, list)\n\n # Returned object is \"TokenizedSenetence\" class if you put return_list=False\n tokenized_obj = mecab_wrapper.tokenize(sentence=sentence, return_list=False)\n assert isinstance(tokenized_obj, TokenizedSenetence)\n\n # ========================================================\n # FILTERING\n # ========================================================\n # you can filter tokens by stopwords or POS conditions\n\n # stopword is list objetc\n stopwords = [u'テヘラン']\n assert isinstance(tokenized_obj, TokenizedSenetence)\n # returned object is \"FilteredObject\" class\n filtered_obj = mecab_wrapper.filter(\n parsed_sentence=tokenized_obj,\n stopwords=stopwords\n )\n assert isinstance(filtered_obj, FilteredObject)\n\n # pos condition is list of tuples\n # You can set POS condition \"ChaSen 品詞体系 (IPA品詞体系)\" of this page http://www.unixuser.org/~euske/doc/postag/#chasen\n pos_condition = [(u'名詞', u'固有名詞'), (u'動詞', u'自立')]\n filtered_obj = mecab_wrapper.filter(\n parsed_sentence=tokenized_obj,\n pos_condition=pos_condition\n )\n assert isinstance(filtered_obj, FilteredObject)\n\n\ndef advanced_example():\n # ========================================================\n # USE YOUE OWN DICTIONARY\n # with your own dictionary, you can force Mecab to make some word into one token\n # ========================================================\n # make your own \"user dictionary\" with CSV file\n # To know more about this file, see this page(sorry, Japanese only) https://mecab.googlecode.com/svn/trunk/mecab/doc/dic.html\n example_user_dict = \"userdict.csv\"\n osType=\"generic\"\n\n # set dictType='user' or dictType='all'\n # set pathUserDictCsv\n mecab_wrapper = MecabWrapper(\n dictType='user',\n osType=osType,\n pathUserDictCsv=example_user_dict\n )\n sentence = u'テヘラン(ペルシア語: تهران ; Tehrān Tehran.ogg 発音[ヘルプ/ファイル]/teɦˈrɔːn/、英語:Tehran)は、西アジア、イランの首都でありかつテヘラン州の州都。人口12,223,598人。都市圏人口は13,413,348人に達する。'\n tokenized_obj = mecab_wrapper.tokenize(sentence, return_list=False)\n assert isinstance(tokenized_obj, TokenizedSenetence)\n for token_obj in tokenized_obj.tokenized_objects:\n assert isinstance(token_obj, TokenizedResult)\n if token_obj.word_stem == u'ペルシア語':\n print(token_obj.word_stem)\n\nif __name__ == \"__main__\":\n basic_example()\n advanced_example()","sub_path":"examples/examples.py","file_name":"examples.py","file_ext":"py","file_size_in_byte":3949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"166037785","text":"from day12.zawodnik import Zawodnik\n\nz1 = Zawodnik(\"Lewy\", \"noga\")\n\nprint(z1.imie)\nprint(z1.dyscyplina)\n\nz1.ustaw_nr_koszulki(123)\n\nz1.wypisz_numer()\n\nz1.ustaw_zarobki(\"milion\")\n\nz1.wypisz_zarobki()\n\nz1._Zawodnik__zarobki = 200\nprint(z1._Zawodnik__zarobki)\n\n\nprint(z1.__dict__)\n","sub_path":"day12/sporty.py","file_name":"sporty.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"646324105","text":"## Bulk image resizer\n\n# This script simply resizes all the images in a folder to one-eigth their\n# original size. It's useful for shrinking large cell phone pictures down\n# to a size that's more manageable for model training.\n\n# Usage: place this script in a folder of images you want to shrink,\n# and then run it.\n\nimport numpy as np\nimport cv2\nimport os\n\ndir_path = os.getcwd()\n# dir_path = \"D:\\\\Uni_Stuttgart\\\\MA\\\\image5\"\n\nfor filename in os.listdir(dir_path):\n # If the images are not .JPG images, change the line below to match the image type.\n if filename.endswith(\".jpg\"):\n print(filename)\n image = cv2.imread(filename)\n height, width, _ = image.shape\n ratio1 = 240 / height\n ratio2 = 320 / width\n ratio = min(ratio1, ratio2)\n print(height, width)\n resized = cv2.resize(image, None,fx=ratio, fy=ratio, interpolation=cv2.INTER_AREA)\n print(resized.shape)\n cv2.imwrite(filename,resized)","sub_path":"image_processing_tool/resizer.py","file_name":"resizer.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"40818239","text":"import datetime\nimport copy\nfrom gladier_xpcs.collections import SharedCollection\n\n\napsdataanalysis = SharedCollection('f3305466-c63d-4a54-8bfc-624402c970bc',\n '/eagle/APSDataAnalysis/XPCS/', name='Gladier XPCS')\nxpcs_data = SharedCollection('74defd5b-5f61-42fc-bcc4-834c9f376a4f',\n '/eagle/XPCS-DATA-DYS/', name='XPCS Data 8-ID APS')\nclutch = SharedCollection('fdc7e74a-fa78-11e8-9342-0e3d676669f4', '/', name='APS#Clutchsdmz')\ntheta_ep = SharedCollection('08925f04-569f-11e7-bef8-22000b9a448b', '/', name='alcf#dtn_theta')\napsdataprocessing = SharedCollection('98d26f35-e5d5-4edd-becf-a75520656c64', \n '/eagle/APSDataProcessing/aps8idi/', name='APS8IDI')\n\nclass BaseDeployment:\n source_collection: SharedCollection = None\n staging_collection: SharedCollection = None\n pub_collection: SharedCollection = None\n globus_endpoints = dict()\n funcx_endpoints = dict()\n flow_input = dict()\n\n def get_input(self):\n fi = self.flow_input.copy()\n fi['input'].update(self.funcx_endpoints)\n fi['input'].update(self.globus_endpoints)\n return fi\n\nclass Talc(BaseDeployment):\n\n globus_endpoints = {\n 'globus_endpoint_source': clutch.uuid,\n 'globus_endpoint_proc': theta_ep.uuid,\n }\n\n funcx_endpoints = {\n 'funcx_endpoint_non_compute': 'e449e8b8-e114-4659-99af-a7de06feb847',\n 'funcx_endpoint_compute': '4c676cea-8382-4d5d-bc63-d6342bdb00ca',\n }\n\n flow_input = {\n 'input': {\n 'staging_dir': apsdataanalysis.path / 'data_online',\n }\n }\n\n\nclass NickPolarisGPU(BaseDeployment):\n\n source_collection = xpcs_data\n staging_collection = apsdataanalysis\n pub_collection = xpcs_data\n\n globus_endpoints = {\n # Eagle -- XPCS Data 8-ID APS\n 'globus_endpoint_source': xpcs_data.uuid,\n 'globus_endpoint_proc': apsdataanalysis.uuid,\n }\n\n funcx_endpoints = {\n # Theta login\n 'funcx_endpoint_non_compute': '553e7b64-0480-473c-beef-be762ba979a9',\n # Containers\n 'funcx_endpoint_compute': '4a6f2b52-d392-4a57-ad77-ae6e86daf503',\n }\n\n flow_input = {\n 'input': {\n 'staging_dir': staging_collection.path / 'nick/xpcs_staging',\n }\n }\n\n\nclass NickPortalDeployment(BaseDeployment):\n\n def get_input(self):\n \"\"\"Separate portal runs by current datetime/second. This prevents run collisions\n of mulitple people running flows at the same time\"\"\"\n now = datetime.datetime.now()\n now = now - datetime.timedelta(microseconds=now.microsecond)\n now = now.isoformat().replace(':', '')\n\n finput = copy.deepcopy(super().get_input())\n finput['input']['staging_dir'] = finput['input']['staging_dir'].format(now=now)\n return finput\n\n flow_input = {\n 'input': {\n 'staging_dir': '/projects/APSDataAnalysis/XPCS/portal/{now}/',\n 'corr_loc': '/eagle/APSDataAnalysis/XPCS/xpcs-eigen/build/corr',\n # We don't have a way to store authorization data within a database yet.\n # FuncX ids on the portal need to be specified manually\n 'apply_qmap_funcx_id': '4d15c42d-a982-46ed-a548-497ac5977b70',\n 'eigen_corr_funcx_id': 'df859253-1113-4cbc-820c-8cf4afbf5764',\n 'gather_xpcs_metadata_funcx_id': '348e7fe6-7d64-4ccf-84b0-502294a087e9',\n 'make_corr_plots_funcx_id': 'dba85394-eae5-4651-827e-1cf03f536a75',\n 'publish_gather_metadata_funcx_id': '9a36d48b-b072-4e7d-a2dc-8f4a31ef9b45',\n 'publish_preparation_funcx_id': '4b39dbd5-1954-4923-89e3-9abbb39c0375',\n 'warm_nodes_funcx_id': 'f369eb60-9a4c-49cb-a078-abb1a81a7c66'\n }\n }\n\nclass HannahTheta(BaseDeployment):\n\n source_collection = xpcs_data\n staging_collection = apsdataanalysis\n pub_collection = xpcs_data\n\n globus_endpoints = {\n # Eagle -- XPCS Data 8-ID APS\n 'globus_endpoint_source': xpcs_data.uuid,\n 'globus_endpoint_proc': apsdataanalysis.uuid,\n }\n\n flow_input = {\n 'input': {\n 'staging_dir': staging_collection.path / 'hparraga/xpcs_staging',\n }\n }\n\n funcx_endpoints = {\n 'funcx_endpoint_non_compute': 'e3e1aef6-0a6f-4ef1-b9c6-a14b0efb1dfa',\n 'funcx_endpoint_compute': '3d9fde8a-1dfa-4ce7-93ab-5d524a59a4f6',\n }\n\n\nclass HannahPolaris(BaseDeployment):\n \n source_collection = xpcs_data\n staging_collection = apsdataanalysis\n pub_collection = xpcs_data\n\n globus_endpoints = {\n # Eagle -- XPCS Data 8-ID APS\n 'globus_endpoint_source': xpcs_data.uuid,\n 'globus_endpoint_proc': apsdataanalysis.uuid, \n }\n\n funcx_endpoints = {\n 'funcx_endpoint_non_compute': 'e3e1aef6-0a6f-4ef1-b9c6-a14b0efb1dfa',\n 'funcx_endpoint_compute': '0676a1f2-b92f-41f7-8e4f-6cc93eb6f929',\n }\n\n flow_input = {\n 'input': {\n 'staging_dir': staging_collection.path / 'hparraga/xpcs_staging',\n }\n }\n\n\nclass RyanPolaris(BaseDeployment):\n\n globus_endpoints = {\n 'globus_endpoint_source': 'e55b4eab-6d04-11e5-ba46-22000b92c6ec',\n 'globus_endpoint_proc': theta_ep.uuid,\n }\n\n funcx_endpoints = {\n 'funcx_endpoint_non_compute': '6c4323f4-a062-4551-a883-146a352a43f5',\n 'funcx_endpoint_compute': 'dc2a0cdb-2aee-44f7-a422-c4e28d9f7617',\n }\n\n flow_input = {\n 'input': {\n 'staging_dir': '/eagle/APSDataAnalysis/rchard/xpcs/',\n 'corr_loc': '/eagle/APSDataAnalysis/XPCS/xpcs-eigen/build/corr',\n }\n }\n\n\nclass APS8IDIPolaris(BaseDeployment):\n\n source_collection = xpcs_data\n staging_collection = apsdataprocessing\n pub_collection = xpcs_data\n\n globus_endpoints = {\n # Eagle -- XPCS Data 8-ID APS\n 'globus_endpoint_source': xpcs_data.uuid,\n 'globus_endpoint_proc': apsdataprocessing.uuid,\n }\n\n funcx_endpoints = {\n 'funcx_endpoint_non_compute': 'f8f4692a-0ab7-40d0-b256-ba5b82b5e2ec',\n 'funcx_endpoint_compute': 'f8f4692a-0ab7-40d0-b256-ba5b82b5e2ec',\n }\n\n flow_input = {\n 'input': {\n 'staging_dir': staging_collection.path / 'xpcs_staging',\n }\n }\n\n\nclass RafPolaris(BaseDeployment):\n\n globus_endpoints = {\n 'globus_endpoint_source': 'e55b4eab-6d04-11e5-ba46-22000b92c6ec',\n 'globus_endpoint_proc': theta_ep.uuid,\n }\n\n funcx_endpoints = {\n 'funcx_endpoint_non_compute': 'e449e8b8-e114-4659-99af-a7de06feb847',\n 'funcx_endpoint_compute': 'a93b6438-6ff7-422e-a1a2-9a4c6d9c1ea5',\n }\n\n flow_input = {\n 'input': {\n 'staging_dir': '/eagle/APSDataAnalysis/XPCS/raf/xpcs/',\n }\n }\n\ndeployment_map = {\n 'talc-prod': Talc(),\n 'raf-polaris': RafPolaris(),\n 'hannah-theta': HannahTheta(),\n 'hannah-polaris': HannahPolaris(),\n 'ryan-polaris': RyanPolaris(),\n 'nick-polaris-gpu': NickPolarisGPU(),\n 'aps8idi-polaris': APS8IDIPolaris(),\n}\n","sub_path":"gladier_xpcs/deployments.py","file_name":"deployments.py","file_ext":"py","file_size_in_byte":7025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"71485781","text":"\r\nimport sys\r\nimport os\r\nfrom os import listdir\r\nfrom os.path import isfile, join\r\nimport redis\r\nimport json\r\nimport msgpack\r\nfrom common_tools.redis_support_py3.construct_data_handlers_py3 import Generate_Handlers\r\nfrom common_tools.system_error_log_py3 import System_Error_Logging\r\nfrom common_tools.Pattern_tools_py3.builders.common_directors_py3 import construct_all_handlers\r\n\r\n\r\nfrom common_tools.Pattern_tools_py3.factories.get_site_data_py3 import get_site_data\r\n#\r\n# DB_CONNECTIONS hash key for store data base file locations\r\n#\r\n#\r\n#\r\n\r\n\r\nclass Construct_RPC_Server(object):\r\n\r\n def __init__(self,rpc_queue_handle ):\r\n self.rpc_queue_handle = rpc_queue_handle\r\n self.rpc_queue_handle.register_call_back( \"load\",self.load_file)\r\n self.rpc_queue_handle.register_call_back( \"save\", self.save_file)\r\n self.rpc_queue_handle.register_call_back( \"file_exists\",self.file_exists)\r\n self.rpc_queue_handle.register_call_back( \"delete_file\", self.delete_file)\r\n self.rpc_queue_handle.register_call_back( \"file_directory\", self.file_directory)\r\n self.rpc_queue_handle.register_call_back(\"make_dir\",self.mkdir)\r\n self.rpc_queue_handle.add_time_out_function(self.process_null_msg) \r\n self.rpc_queue_handle.start()\r\n\r\n\r\n def process_null_msg( self ): \r\n return \r\n #print(\"null message\") \r\n \r\n \r\n def load_file(self,input_message):\r\n try:\r\n print(\"load_file\")\r\n path = \"/files/\"+input_message[\"path\"]+\"/\"+input_message[\"file_name\"]\r\n f = open(path, 'r')\r\n data = f.read()\r\n f.close()\r\n return [True,data]\r\n except:\r\n return [False,data]\r\n \r\n def save_file(self,input_message):\r\n try:\r\n print(\"save_file\")\r\n path = \"/files/\"+input_message[\"path\"]+\"/\"+input_message[\"file_name\"]\r\n f = open(path, 'w')\r\n data = input_message[\"data\"]\r\n f.write(data)\r\n f.close()\r\n return [True,None]\r\n except:\r\n return [False,None]\r\n \r\n def file_exists(self,input_message):\r\n try:\r\n print(\"file_exits\")\r\n path = \"/files/\"+input_message[\"path\"]+\"/\"+input_message[\"file_name\"]\r\n return [True,isfile(path)]\r\n except:\r\n return [False,None]\r\n \r\n def file_directory(self,input_message):\r\n try:\r\n print(\"file_directory\")\r\n path = \"/files/\"+input_message[\"path\"]\r\n return [True,listdir(path)]\r\n except:\r\n return [False,None] \r\n\r\n def delete_file(self, input_message):\r\n try:\r\n print(\"delete_file\")\r\n path = \"/files/\"+input_message[\"path\"]+\"/\"+input_message[\"file_name\"]\r\n os.remove(path)\r\n return [True,None]\r\n except:\r\n return [False,None]\r\n \r\n \r\n def mkdir(self,input_message):\r\n try:\r\n print(\"mkdir\")\r\n path = \"/files/\"+input_message[\"path\"]\r\n os.makedirs(path)\r\n return [True,None]\r\n except:\r\n return [False,None]\r\n \r\n \r\n \r\n \r\n\r\n\r\n\r\n\r\n \r\n\r\nif __name__ == \"__main__\":\r\n \r\n import datetime\r\n import time\r\n import string\r\n import urllib.request\r\n import math\r\n import redis\r\n \r\n import json\r\n\r\n import os\r\n import copy\r\n \r\n from common_tools.redis_support_py3.graph_query_support_py3 import Query_Support\r\n import datetime\r\n import msgpack\r\n \r\n\r\n \r\n\r\n\r\n site_data = get_site_data()\r\n \r\n #\r\n # Setup handle\r\n # open data stores instance\r\n \r\n qs = Query_Support( site_data )\r\n container_name = os.getenv(\"CONTAINER_NAME\")\r\n \r\n #\r\n # error logging is only needed once\r\n # for reboot message\r\n #\r\n error_logging = System_Error_Logging(qs,container_name,site_data) \r\n\r\n \r\n \r\n search_list = [\"FILE_SERVER\",\"FILE_SERVER\"]\r\n data_structures = construct_all_handlers(site_data,qs,search_list,field_list=[\"FILE_SERVER_RPC_SERVER\"])\r\n rpc_queue = data_structures[\"FILE_SERVER_RPC_SERVER\"]\r\n Construct_RPC_Server(rpc_queue )\r\n \r\n\r\n \r\n","sub_path":"working/python_containers/file_server/file_server_py3.py","file_name":"file_server_py3.py","file_ext":"py","file_size_in_byte":4138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"583317519","text":"#! /usr/bin/env python\n\n\ndef main():\n import sys, os\n if hasattr(sys, \"pypy_version_info\"):\n v = \"pypy\"\n else:\n v = \"py%s%s\" % (sys.version_info[:2])\n\n os.execvp(\"tox\", [\"tox\", \"-e\", v])\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":".travis-runtox.py","file_name":".travis-runtox.py","file_ext":"py","file_size_in_byte":253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"452773020","text":"import os\nimport sys\n\nclass node:\n def __init__(self, data):\n self.left = None\n self.right = None\n self.data = data\n\n def insert(self, data):\n if self.data:\n if data < self.data:\n if self.left is None:\n self.left = node(data)\n else:\n self.left.insert(data)\n elif data > self.data:\n if self.right is None:\n self.right = node(data)\n else:\n self.right.insert(data)\n else:\n self.data = data\n\n def printTree(self):\n if self.left:\n self.left.printTree()\n print(self.data, end=\" \")\n if self.right:\n self.right.printTree()\n\n def inorderTraversal(self, root):\n # Left -> Root -> Right\n res = []\n if root:\n res = self.inorderTraversal(root.left)\n res.append(root.data)\n res = res + self.inorderTraversal(root.right)\n return res\n\n def preorderTraversal(self, root):\n # Root -> Left -> right\n res = []\n if root:\n res.append(root.data)\n res = res + self.preorderTraversal(root.left)\n res = res + self.preorderTraversal(root.right)\n return res\n\n def postorderTraversal(self,root):\n # Left -> Right -> Root\n res = []\n if root:\n res = self.postorderTraversal(root.left)\n res = res + self.postorderTraversal(root.right)\n res.append(root.data)\n return res\n\n def find_deepest_right(self, root):\n if root:\n if root.right is None:\n return root.data\n else:\n self.find_deepest_right(self, root.right)\n else:\n return None\n return 0\n\n def delete(self, root, data):\n if root is None:\n return root\n if data < root.data:\n root.left = self.delete(root.left, data)\n elif data > root.data:\n root.right = self.delete(root.right, data)\n else:\n # if root node to be deleted\n if root.left is None:\n temp = root.right\n root = None\n return temp\n elif root.right is None:\n temp = root.left\n root = None\n return temp\n temp = self.minValueNode()\n\n root.data = temp.data\n\n root.right = self.delete(root.right, temp.data)\n return root\n\n def minValueNode(node):\n current = node\n while current.left is not None:\n current = current.left\n return current\n\nroot = node(10)\nroot.printTree()\nprint(\"\\n\")\nroot.insert(5)\nroot.insert(500)\nroot.insert(-1)\nroot.printTree()\nprint(\"\\n\")\nroot.insert(100)\nroot.printTree()\nprint(\"\\n\")\nprint(\"\\nInorder Traversal \")\nprint(root.inorderTraversal(root))\nprint(\"\\nPre_order Traversal\")\nprint(root.preorderTraversal(root))\nprint(\"\\nPost_order Traversal\")\nprint(root.postorderTraversal(root))\nroot.delete(root,5)\nprint(root.postorderTraversal(root))\nroot.insert(-12)\nroot.printTree()\nprint(\"\\n\")\nroot.insert(5)\nroot.insert(500)\nroot.printTree()\nprint(\"\\n\")\nroot.delete(root,500)\nroot.printTree()","sub_path":"Gold_Badge/Binary_Tree.py","file_name":"Binary_Tree.py","file_ext":"py","file_size_in_byte":3277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"180298822","text":"import json\nimport time\nimport pytz\nimport logging\nimport tzlocal\nimport calendar\nfrom threading import Timer\nimport collections\nimport numpy as np\nimport pandas as pd\nimport elasticsearch\nimport dateutil.parser\n\nimport traceback\n\nfrom datetime import date\nfrom datetime import datetime\nfrom datetime import timedelta\n\nfrom elastic_helper import es_helper \nfrom flask import Flask, jsonify, request\nfrom flask_restplus import Api, Resource, fields\n\n\nlogger=logging.getLogger()\nlogger.info(\"***>\"*100)\n\ndef config(api,conn,es,redis,token_required):\n\n @api.route('/api/v1/biac/kpi_model/')\n @api.doc(description=\"Get kpi entity model.\",params={'token': 'A valid token'})\n\n class biacKPIEntityModel(Resource): \n @token_required()\n @api.doc(description=\"Get kpi entity model.\",params={'token': 'A valid token'})\n def get(self, kpi, user=None):\n logger.info(\"biac - get kpi\"+kpi+\" model\")\n logger.info(user)\n\n kpi_model = {}\n if kpi == '304': \n kpi_model = determine_model_304(user)\n elif kpi == '600': \n kpi_model = determine_model_600(user)\n else:\n kpi_model = retrieve_kpi_entities_model(es, user['privileges'], kpi=kpi)\n return {'error':\"\",'status':'ok', 'data': json.dumps(kpi_model)}\n \n @api.route('/api/v1/biac/kpi600_monthly///')\n @api.doc(description=\"Get kpi600 monthly.\",params={'token': 'A valid token'})\n\n class biacKPI600Monthly(Resource): \n @token_required()\n @api.doc(description=\"Get kpi600 monthly record.\",params={'token': 'A valid token'})\n def get(self, lot, tec, date, user=None):\n logger.info(\"biac - get kpi600 monthly\")\n\n month = dateutil.parser.parse(date)\n \n return {'error':\"\",'status':'ok', 'data': get_kpi600_value(es, lot, tec, month)}\n \n @api.route('/api/v1/biac/kpi304_monthly///')\n @api.doc(description=\"Get kpi304 monthly.\",params={'token': 'A valid token'})\n\n class biacKPI304Monthly(Resource): \n @token_required()\n @api.doc(description=\"Get kpi304 month records.\",params={'token': 'A valid token'})\n def get(self, lot, tec, date, user=None):\n logger.info(\"biac - get kpi304 monthly\")\n\n date = dateutil.parser.parse(date)\n \n return {'error':\"\",'status':'ok', 'data': get_kpi304_values(es, lot, tec, date)}\n\n def post(self, lot, tec, date, user=None):\n logger.info(\"biac - get kpi304 monthly\")\n try:\n date = dateutil.parser.parse(date)\n update_kib_kpi304(es, lot, tec, date)\n \n return {'error':\"\",'status':'ok'}\n except Exception as e:\n logger.error(e)\n return {'error':str(e),'status':'ko'}\n \n post_kpi104_monthly = api.model('post_kpi104_monthly_model', {\n 'last_update_time': fields.Date(description=\"the last update time\", required=True),\n })\n\n @api.route('/api/v1/biac/kpi104_monthly')\n @api.doc(description=\"Post kpi104 monthly.\",params={'token': 'A valid token'})\n\n class biacKPI104Monthly(Resource): \n @token_required()\n @api.doc(description=\"Post biac kpi 104 monthy.\",params={'token': 'A valid token'})\n @api.expect(post_kpi104_monthly)\n def post(self, user=None):\n logger.info(\"biac - post kpi 104 monthly\")\n req= json.loads(request.data.decode(\"utf-8\")) \n last_update_time = dateutil.parser.parse(req['last_update_time'])\n\n update_kpi104_monthly(es, last_update_time)\n\n return {'error':\"\",'status':'ok'}\n\n post_kpi101_monthly = api.model('post_kpi101_monthly_model', {\n 'month_to_update': fields.Date(description=\"the month to update\", required=True),\n 'number_of_call_1': fields.String(description=\"number_of_call_1\", required=False),\n 'number_of_call_2': fields.String(description=\"number_of_call_2\", required=False),\n 'number_of_call_3': fields.String(description=\"number_of_call_3\", required=False),\n })\n\n @api.route('/api/v1/biac/kpi101_monthly')\n @api.doc(description=\"Post kpi101 monthly.\",params={'token': 'A valid token'})\n\n class biacKPI101Monthly(Resource): \n @token_required()\n @api.doc(description=\"Post biac kpi 101 monthy.\",params={'token': 'A valid token'})\n @api.expect(post_kpi101_monthly)\n def post(self, user=None):\n logger.info(\"biac - post kpi 101 monthly\")\n req= json.loads(request.data.decode(\"utf-8\")) \n month_to_update = dateutil.parser.parse(req['month_to_update'])\n\n number_of_call_1 = -1\n number_of_call_2 = -1\n number_of_call_3 = -1\n\n if 'number_of_call_1' in req:\n number_of_call_1 = req['number_of_call_1']\n if 'number_of_call_2' in req:\n number_of_call_2 = req['number_of_call_2']\n if 'number_of_call_3' in req:\n number_of_call_3 = req['number_of_call_3']\n\n return {'error':\"\",'status':'ok', 'data': json.dumps(update_kpi101_monthly(es, month_to_update, number_of_call_1, number_of_call_2, number_of_call_3), cls=DateTimeEncoder)}\n\nclass DateTimeEncoder(json.JSONEncoder):\n def default(self, o):\n if isinstance(o, datetime):\n return o.isoformat()\n elif isinstance(o, np.integer): \n return int(o)\n elif isinstance(o, np.int64): \n return int(o)\n elif isinstance(o, pd.int64): \n return int(o)\n\n return json.JSONEncoder.default(self, o)\n\n##########################################################\n# KPI101\n##########################################################\n\ndef update_kpi101_monthly(es, month, number_of_call_1=-1, number_of_call_2=-1, number_of_call_3=-1):\n #logger.info('update_kpi101_monthly date: '+str(month))\n \n start_dt = mkFirstOfMonth(month)\n end_dt = mkLastOfMonth(month)\n \n\n logger.info('**'*100)\n\n\n logger.info(start_dt)\n logger.info(end_dt)\n \n logger.info(number_of_call_1)\n logger.info(number_of_call_2)\n logger.info(number_of_call_3)\n \n df = es_helper.elastic_to_dataframe(es,index=\"biac_kpi101_call*\"\n ,query='*'\n ,start=start_dt\n ,end=end_dt\n ,timestampfield='datetime')\n\n df_group = None\n if len(df) > 0:\n df_group = df.groupby('lot').agg({'_id':'count'})\n\n obj = {\n 'not_timely_answer' : 0,\n 'not_timely_answer_1' : 0,\n 'not_timely_answer_2' : 0,\n 'not_timely_answer_3' : 0,\n 'number_of_call_1' : 0,\n 'number_of_call_2' : 0,\n 'number_of_call_3' : 0,\n 'percentage' : 0,\n 'percentage_1' : 0,\n 'percentage_2' : 0,\n 'percentage_3' : 0,\n }\n\n try:\n obj['not_timely_answer_1']=df_group.loc[1, '_id']\n except:\n logger.info('no value for 1')\n try:\n obj['not_timely_answer_2']=df_group.loc[2, '_id']\n except:\n logger.info('no value for 2')\n try:\n obj['not_timely_answer_3']=df_group.loc[3, '_id']\n except:\n logger.info('no value for 3')\n\n df_month = None\n if number_of_call_1 == -1 or number_of_call_2 == -1 or number_of_call_3 == -1:\n df_month = es_helper.elastic_to_dataframe(es,index=\"biac_kpi101_monthly*\"\n ,query='*'\n ,start=start_dt\n ,end=end_dt\n ,timestampfield='datetime')\n \n\n if number_of_call_1 == -1:\n try:\n obj['number_of_call_1'] = df_month.iloc[0]['number_of_call_1']\n except:\n obj['number_of_call_1'] = 0\n else:\n obj['number_of_call_1'] = number_of_call_1\n \n if number_of_call_2 == -1:\n try:\n obj['number_of_call_2'] = df_month.iloc[0]['number_of_call_2']\n except:\n obj['number_of_call_2'] = 0\n else:\n obj['number_of_call_2'] = number_of_call_2\n \n if number_of_call_3 == -1:\n try:\n obj['number_of_call_3'] = df_month.iloc[0]['number_of_call_3']\n except:\n obj['number_of_call_3'] = 0\n else:\n obj['number_of_call_3'] = number_of_call_3\n \n\n\n obj['not_timely_answer'] = obj['not_timely_answer_1'] + obj['not_timely_answer_2'] + obj['not_timely_answer_3']\n obj['number_of_call'] = obj['number_of_call_1'] + obj['number_of_call_2'] + obj['number_of_call_3']\n\n if obj['number_of_call'] != 0:\n obj['percentage'] = round(((obj['number_of_call'] - obj['not_timely_answer']) / obj['number_of_call'])*100, 2)\n if obj['number_of_call_1'] != 0:\n obj['percentage_1'] = round(((obj['number_of_call_1'] - obj['not_timely_answer_1']) / obj['number_of_call_1'])*100, 2)\n if obj['number_of_call_2'] != 0:\n obj['percentage_2'] = round(((obj['number_of_call_2'] - obj['not_timely_answer_2']) / obj['number_of_call_2'])*100, 2)\n if obj['number_of_call_3'] != 0:\n obj['percentage_3'] = round(((obj['number_of_call_3'] - obj['not_timely_answer_3']) / obj['number_of_call_3'])*100, 2)\n \n local_timezone = tzlocal.get_localzone()\n \n obj['datetime'] = local_timezone.localize(start_dt)\n _id = int(obj['datetime'].timestamp())*1000\n \n res = es.index(index=\"biac_kpi101_monthly\", doc_type='doc', id=_id, body=json.dumps(obj, cls=DateTimeEncoder))\n \n logger.info(res)\n \n return obj\n\n##########################################################\n# KPI304\n##########################################################\n\ndef get_kpi304_values(es, lot, tec, date):\n query = 'lot:'+lot\n if lot == '2':\n query+=' AND tec:'+tec\n \n print(query)\n containertimezone=pytz.timezone(tzlocal.get_localzone().zone)\n \n start_dt = containertimezone.localize(datetime(date.year, date.month, 1))\n end_dt = containertimezone.localize(datetime(date.year, date.month, calendar.monthrange(date.year, date.month)[1], 23, 59, 59))\n \n print(start_dt)\n print(end_dt)\n \n dataframe=es_helper.elastic_to_dataframe(es,index=\"biac_kpi304\"\n ,datecolumns=[\"@timestamp\"]\n ,query=query\n ,start=start_dt\n ,end=end_dt)\n \n if len(dataframe) == 0:\n print('dataframe empty we create in DB')\n default_df=pd.DataFrame(pd.date_range(start=start_dt, end=end_dt), columns=['_timestamp'])\n\n if lot == '1' or lot == '2': \n default_df['tech']=2\n default_df['tech1']=1\n default_df['tech2']=1\n else:\n default_df['tech']=1\n default_df['hoofd']=1\n\n default_df['dayofweek']=default_df['_timestamp'].dt.dayofweek\n default_df.loc[default_df['dayofweek']>=5, 'tech']=0\n default_df.loc[default_df['dayofweek']>=5, 'hoofd']=0\n default_df['total']=default_df['tech']+default_df['hoofd']\n\n default_df['tec']=tec\n default_df['lot']=lot\n\n default_df['_index']='biac_kpi304'\n default_df['_id']=default_df['lot']+'_'+default_df['tec']+'_'+default_df['_timestamp'].astype(str)\n del default_df['dayofweek']\n\n \n es_helper.dataframe_to_elastic(es, default_df)\n default_df['_timestamp']=default_df['_timestamp'].dt.date.astype(str)\n\n logger.info('query'*100)\n thr = Timer(5, update_kib_kpi304, (es, lot, tec, date))\n \n thr.start()\n\n\n return default_df.rename({'_timestamp': '@timestamp'}, axis='columns').to_json(orient='records')\n \n else:\n dataframe.sort_values('@timestamp', inplace=True)\n dataframe['@timestamp']=dataframe['@timestamp'].dt.date.astype(str)\n return dataframe.to_json(orient='records')\n\n\ndef update_kib_kpi304(es, lot, tec, date):\n containertimezone=pytz.timezone(tzlocal.get_localzone().zone)\n\n start_dt = containertimezone.localize(datetime(date.year, date.month, 1))\n end_dt = containertimezone.localize(datetime(date.year, date.month, calendar.monthrange(date.year, date.month)[1], 23, 59, 59))\n\n query = 'lot:'+str(lot)+' AND tec:'+tec\n\n logger.info('query'*100)\n logger.info(query)\n logger.info(start_dt)\n logger.info(end_dt)\n logger.info(query)\n df = es_helper.elastic_to_dataframe(es, index='biac_kpi304',datecolumns=[\"@timestamp\"]\\\n , query=query, start=start_dt, end=end_dt)\n\n\n if 'off' not in df:\n df['off'] = 0\n df['off'] = df['off'].fillna(0)\n df['week_day'] = df['@timestamp'].dt.weekday\n logger.info(df.shape)\n df.head()\n\n new_arr=[]\n for index, row in df.iterrows():\n flag_off = False\n\n if row['week_day'] == 5 or row['week_day'] == 6 or int(row['off']) == 1:\n flag_off = True\n\n type_list = ['hoofd', 'tech1', 'tech2']\n if 'tech1' not in row or row['tech1'] != row['tech1']:\n type_list = ['hoofd', 'tech']\n\n for i in type_list:\n obj = {\n 'type': i,\n 'lot': row['lot'],\n 'kpi304_technic': row['tec'],\n '@timestamp': row['@timestamp'],\n }\n\n if flag_off:\n obj['value'] = -1\n else:\n obj['value'] = row[i]\n\n obj['_id']= 'lot'+str(row['lot'])+'_'+row['tec']+'_'+i+'_'+str(int(obj['@timestamp'].timestamp()*1000))\n\n if obj['type'] == 'hoofd':\n obj['type_nl'] = 'Verantwoordelijke'\n elif obj['type'] == 'tech':\n obj['type_nl'] = 'Technieker'\n elif obj['type'] == 'tech1':\n obj['type_nl'] = 'Technieker 1'\n elif obj['type'] == 'tech2':\n obj['type_nl'] = 'Technieker 2'\n\n new_arr.append(obj)\n\n\n df_to_push=pd.DataFrame(new_arr)\n\n df_to_push['_index'] = 'biac_kib_kpi304'\n logger.info(df_to_push.shape)\n\n es_helper.dataframe_to_elastic(es, df_to_push)\n\n##########################################################\n# KPI104\n##########################################################\n\ndef update_month_kpi104(es, month):\n logger.info(month)\n \n local_timezone = tzlocal.get_localzone()\n\n\n start_dt = month\n end_dt = datetime(month.year, month.month, calendar.monthrange(month.year, month.month)[1])\n \n logger.info('-------------')\n logger.info(start_dt)\n logger.info(end_dt)\n \n df = es_helper.elastic_to_dataframe(es,index=\"biac_kpi104_check*\"\n ,query='*'\n ,start=start_dt\n ,end=end_dt)\n\n\n\n \n logger.info('res len %d' % len(df))\n max_dt = start_dt.astimezone(local_timezone)\n\n obj_past = None\n try:\n obj_past = es.get(index=\"biac_kpi104_monthly\", doc_type='doc', id=start_dt.astimezone(local_timezone))['_source']\n except elasticsearch.NotFoundError:\n logger.error(\"Unable to retrive past data\")\n logger.error(error)\n\n obj_past = {\n '@timestamp' : start_dt.astimezone(local_timezone),\n 'last_update' : start_dt.astimezone(local_timezone),\n 'shift_number' : 0,\n 'shift_presence' : 0,\n 'percentage' : 100\n }\n es.index(index=\"biac_kpi104_monthly\", doc_type='doc', id=obj_past['@timestamp'], body=json.dumps(obj_past, cls=DateTimeEncoder))\n\n logger.info(obj_past)\n\n shift_presence = 0\n\n if len(df)==0:\n logger.info('empty data frame')\n\n\n else:\n df['dt'] = pd.to_datetime(df['@timestamp'], unit='ms', utc=True)\n\n\n shift_number = 0\n shift_presence = 0\n percentage = 100\n \n try:\n shift_presence = df[df['value']]['value'].count()\n max_dt = max(df[df['value']]['dt']).to_pydatetime().astimezone(local_timezone)\n shift_number = max_dt.day * 6\n percentage = 0\n except: \n logger.info('shift_presence to 0')\n\n if obj_past['shift_number'] != 0:\n shift_number = 6\n shift_presence = 0\n percentage = 0\n\n\n logger.info(max_dt)\n \n \n \n logger.info('shift_number %d ' % shift_number)\n logger.info('shift_presence %d ' % shift_presence)\n \n obj = {\n '@timestamp' : start_dt.astimezone(local_timezone),\n 'last_update' : max_dt,\n 'shift_number' : shift_number,\n 'shift_presence' : shift_presence,\n 'percentage' : percentage\n }\n \n if shift_number != 0:\n obj['percentage'] = round((shift_presence*100)/shift_number, 1)\n \n \n logger.info(json.dumps(obj, cls=DateTimeEncoder))\n \n res = es.index(index=\"biac_kpi104_monthly\", doc_type='doc', id=obj['@timestamp'], body=json.dumps(obj, cls=DateTimeEncoder))\n logger.info(res)\n\ndef update_kpi104_monthly(es, date):\n logger.info('update_kpi104_monthly date: '+str(date))\n \n start = date - timedelta(days=date.weekday())\n \n if start.month != date.month:\n month_1 = datetime(start.year, start.month, 1)\n logger.info('update_month -> month_1: %s' % month_1)\n update_month_kpi104(es, month_1)\n \n month_2 = datetime(date.year, date.month, 1)\n logger.info('update_month -> month_2: %s' % month_2)\n update_month_kpi104(es, month_2)\n else:\n month = datetime(date.year, date.month, 1)\n logger.info('update_month -> month: %s' % month)\n update_month_kpi104(es, month)\n\n##########################################################\n# KPI600\n##########################################################\n\ndef retrieve_kpi_entities_model(es, privileges, kpi='600'):\n entities = []\n entitiesHT = {}\n\n res=es.search(index=\"biac_entity\",body={}, size=1000) \n for rec in res[\"hits\"][\"hits\"]:\n entities.append(rec[\"_source\"])\n entitiesHT[rec[\"_source\"][\"key\"]]=rec[\"_source\"]\n\n return getTechnicsKPIByPriv(entities, privileges, kpi=kpi)\n\ndef getTechnicsKPIByPriv(entities, privileges = [], kpi='600'):\n\n if type(privileges) == str:\n privileges = [privileges]\n logger.info(\"Get Entities per privileges.[\"+ \",\".join(privileges)+\"]\")\n \n ret_technics = {}\n \n \n\n for priv in privileges:\n for rec in entities:\n if 'kpi'+kpi+'_privileges' in rec: \n for rec_priv in rec['kpi'+kpi+'_privileges']:\n if priv == 'admin' or rec_priv == priv:\n if 'kpi'+kpi+'_technics' in rec:\n if rec['lot'] not in ret_technics:\n ret_technics[rec['lot']] = []\n \n ret_technics[rec['lot']] += rec['kpi'+kpi+'_technics']\n \n elif 'privileges' in rec:\n for rec_priv in rec['privileges']:\n\n\n if priv == 'admin' or rec_priv == priv:\n if 'kpi'+kpi+'_technics' in rec:\n\n if rec['lot'] not in ret_technics:\n ret_technics[rec['lot']] = []\n \n ret_technics[rec['lot']] += rec['kpi'+kpi+'_technics']\n\n \n for i in ret_technics:\n ret_technics[i] = list(set(ret_technics[i]))\n\n\n logger.info(\"RESULTS_\"*100)\n logger.info(ret_technics)\n return ret_technics\n\ndef put_default_values_kpi600_monthly(es, entities, month):\n entities_model = determine_model_600()\n arr = []\n\n for i in entities_model:\n obj = {\n 'lot' : i\n }\n\n for j in entities_model[i]:\n obj['kpi600_technic'] = j\n\n arr.append(obj.copy())\n\n df_kpi600 = pd.DataFrame(arr)\n df_kpi600\n\n start_dt = mkFirstOfMonth(month)\n local_timezone = tzlocal.get_localzone()\n\n start_dt = local_timezone.localize(start_dt)\n\n df_kpi600['@timestamp'] = start_dt\n df_kpi600['kpi601'] = False\n df_kpi600['kpi602'] = False\n df_kpi600['kpi603'] = False\n df_kpi600['cancel_by_customer'] = False\n df_kpi600['_id'] = df_kpi600.apply(lambda row: str(row['lot'])+'_'+\n row['kpi600_technic'].replace('/','').replace(' ','').lower()+'_'+\n str(int(row['@timestamp'].timestamp()*1000)), axis=1)\n \n bulkbody=''\n for index, row in df_kpi600.iterrows():\n action = {}\n action[\"index\"] = {\"_index\": 'biac_kpi600_monthly',\n \"_type\": \"doc\", \"_id\": row['_id']}\n\n try:\n res=es.get(index='biac_kpi600_monthly',doc_type=\"doc\",id= row['_id'])\n logger.info(\"Record \"+row['_id']+ \" found. Continuing...\")\n continue\n except:\n logger.info(\"Record \"+row['_id']+ \" not found. Creating it.... \") \n\n obj = {}\n\n for j in df_kpi600.columns:\n obj[j] = row[j]\n \n if '_id' in obj:\n del obj['_id']\n\n bulkbody += json.dumps(action)+\"\\r\\n\"\n bulkbody += json.dumps(obj, cls=DateTimeEncoder) + \"\\r\\n\"\n\n bulkbody\n\n bulkres = es.bulk(bulkbody, request_timeout=30)\n\ndef get_kpi600_value(es, lot, kpi600_technic, month):\n start_dt = mkFirstOfMonth(month)\n local_timezone = tzlocal.get_localzone()\n start_dt = local_timezone.localize(start_dt)\n \n es_id = (str(lot)+'_'+kpi600_technic+'_'+str(int(start_dt.timestamp()*1000))).lower()\n \n print(es_id)\n \n entities = []\n\n res=es.search(index=\"biac_entity\",body={}, size=1000) \n for rec in res[\"hits\"][\"hits\"]:\n entities.append(rec[\"_source\"])\n\n ret = None\n try:\n res = es.get(index='biac_kpi600_monthly', doc_type='doc', id=es_id)\n ret = res['_source']\n\n logger.info('=='*20)\n logger.info(str(ret))\n\n local_timezone = tzlocal.get_localzone()\n \n except elasticsearch.NotFoundError:\n print('setting default current month')\n put_default_values_kpi600_monthly(es, entities, month)\n \n ret = {\n 'kpi600_technic': kpi600_technic,\n 'lot': lot,\n '@timestamp': start_dt.isoformat(),\n 'kpi601': True,\n 'kpi602': True,\n 'kpi603': True,\n 'cancel_by_customer': False\n }\n \n ret['_id'] = es_id\n\n \n next_month = add_months(start_dt, 1)\n next_month_dt = datetime(next_month.year, next_month.month, next_month.day)\n \n es_id = (str(lot)+'_'+kpi600_technic+'_'+str(int(next_month_dt.timestamp()*1000))).lower()\n \n try:\n res = es.get(index='biac_kpi600_monthly', doc_type='doc', id=es_id)\n except elasticsearch.NotFoundError:\n print('setting default next month')\n put_default_values_kpi600_monthly(es, entities, next_month_dt)\n \n print(next_month_dt)\n print(es_id)\n \n \n return ret \n\n\n\ndef mkDateTime(dateString,strFormat=\"%Y-%m-%d\"):\n # Expects \"YYYY-MM-DD\" string\n # returns a datetime object\n eSeconds = time.mktime(time.strptime(dateString,strFormat))\n return datetime.fromtimestamp(eSeconds)\n\ndef formatDate(dtDateTime,strFormat=\"%Y-%m-%d\"):\n # format a datetime object as YYYY-MM-DD string and return\n return dtDateTime.strftime(strFormat)\n\ndef mkFirstOfMonth2(dtDateTime):\n #what is the first day of the current month\n ddays = int(dtDateTime.strftime(\"%d\"))-1 #days to subtract to get to the 1st\n delta = timedelta(days= ddays) #create a delta datetime object\n return dtDateTime - delta #Subtract delta and return\n\ndef mkFirstOfMonth(dtDateTime):\n #what is the first day of the current month\n #format the year and month + 01 for the current datetime, then form it back\n #into a datetime object\n return mkDateTime(formatDate(dtDateTime,\"%Y-%m-01\"))\n\ndef mkLastOfMonth(dtDateTime):\n dYear = dtDateTime.strftime(\"%Y\") #get the year\n dMonth = str(int(dtDateTime.strftime(\"%m\"))%12+1)#get next month, watch rollover\n dDay = \"1\" #first day of next month\n nextMonth = mkDateTime(\"%s-%s-%s\"%(dYear,dMonth,dDay))#make a datetime obj for 1st of next month\n delta = timedelta(seconds=1) #create a delta of 1 second\n return nextMonth - delta #subtract from nextMonth and return\n\ndef add_months(sourcedate, months):\n\n month = sourcedate.month - 1 + months\n year = sourcedate.year + month // 12\n month = month % 12 + 1\n day = min(sourcedate.day, calendar.monthrange(year,month)[1])\n return date(year, month, day)\n\n\n\n\n\ndef determine_model_304(user=None):\n\n model = {}\n\n if user is None or user.get('filters') is None or user.get('filters') == []:\n model = {\n 1: [\"BACHEA\"], \n 2: [\"SANI\", \"ELEC\", \"FIRE\", \"HVAC\"], \n 3: [\"BACEXT\"], \n 4: [\"BACDNB\"]\n }\n\n else:\n\n filters = user.get('filters')\n\n if 'LOT1' in filters:\n model[1] = ['BACHEA']\n\n \n model[2] = []\n if 'LOT2_BACFIR_GONDELS' in filters:\n model[2].append('FIRE')\n if 'LOT2_BACFIR_ACCESS' in filters:\n model[2].append('FIRE')\n if 'LOT2_BACFIR_FIRE' in filters:\n model[2].append('FIRE')\n if 'LOT2_BACSAN_SANI' in filters:\n model[2].append('SANI')\n if 'LOT2_BACSAN_HVACPA' in filters:\n model[2].append('SANI')\n if 'LOT2_BACELE' in filters:\n model[2].append('ELEC')\n if 'LOT2_BACHVA' in filters:\n model[2].append('HVAC')\n \n \n if 'LOT2_BACSAN' in filters:\n model[2] = ['SANI']\n \n if 'LOT2_BACFIR' in filters:\n model[2] = ['FIRE']\n \n \n if 'LOT2' in filters:\n model[2] = [\"SANI\", \"ELEC\", \"FIRE\", \"HVAC\"]\n \n model[2] = list(dict.fromkeys(model[2]))\n if model[2] == []:\n del model[2]\n\n\n if 'LOT3' in filters:\n model[3] = ['BACEXT']\n\n if 'LOT4' in filters:\n model[4] = ['BACDNB']\n\n\n return model\n\n\n\ndef determine_model_600(user=None):\n\n model = {}\n\n if user is None or user.get('filters') is None or user.get('filters') == []:\n model = {\n 1: ['HVAC'], \n 2: ['Gondels', 'Acces', 'Fire', 'Sanitaire', 'HVAC PA/TO', 'Elektriciteit', 'HVAC NT/PB/OT/CON'], \n 3: ['Elektriciteit', 'Fire', 'HVAC', 'Sanitaire/Acces'], \n 4: ['Elektriciteit'],\n }\n else:\n\n filters = user.get('filters')\n\n if 'LOT1' in filters:\n model[1] = ['HVAC']\n\n \n model[2] = []\n if 'LOT2_BACFIR_GONDELS' in filters:\n model[2].append('Gondels')\n if 'LOT2_BACFIR_ACCESS' in filters:\n model[2].append('Acces')\n if 'LOT2_BACFIR_FIRE' in filters:\n model[2].append('Fire')\n if 'LOT2_BACSAN_SANI' in filters:\n model[2].append('Sanitaire')\n if 'LOT2_BACSAN_HVACPA' in filters:\n model[2].append('HVAC PA/TO')\n if 'LOT2_BACELE' in filters:\n model[2].append('Elektriciteit')\n if 'LOT2_BACHVA' in filters:\n model[2].append('HVAC NT/PB/OT/CON')\n \n \n if 'LOT2_BACSAN' in filters:\n model[2] = ['Sanitaire', 'HVAC PA/TO']\n \n if 'LOT2_BACFIR' in filters:\n model[2] = ['Gondels', 'Acces', 'Fire']\n \n \n if 'LOT2' in filters:\n model[2] = ['Gondels', 'Acces', 'Fire', 'Sanitaire', 'HVAC PA/TO', 'Elektriciteit', 'HVAC NT/PB/OT/CON']\n\n model[2] = list(dict.fromkeys(model[2])) \n if model[2] == []:\n del model[2]\n\n\n if 'LOT3' in filters:\n model[3] = ['Elektriciteit', 'Fire', 'HVAC', 'Sanitaire/Acces']\n\n if 'filters' in user:\n if \"KPI600-HIDE-LOT3HVAC\" in user[\"filters\"]:\n model[3].remove(\"HVAC\")\n if \"KPI600-HIDE-LOT3ELEC\" in user[\"filters\"]:\n model[3].remove(\"Elektriciteit\")\n if \"KPI600-HIDE-LOT3FIRE\" in user[\"filters\"]:\n model[3].remove(\"Fire\")\n if \"KPI600-HIDE-LOT3SANI\" in user[\"filters\"]:\n model[3].remove(\"Sanitaire/Acces\") \n\n if 'LOT4' in filters:\n model[4] = ['Elektriciteit']\n\n\n return model","sub_path":"sources/lib/ext_biac.py","file_name":"ext_biac.py","file_ext":"py","file_size_in_byte":29238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"57612619","text":"from PyInquirer import prompt\nfrom setting import Settings\n\n\nclass YoutubePlaylist:\n def __init__(self, api):\n self.api = api\n\n settingsObj = Settings.getInstance()\n s = settingsObj.getSettings()\n if s.get(\"yt_id\"):\n self.id = s.get(\"yt_id\")\n else:\n self.id = self.getId()\n s[\"yt_id\"] = self.id\n settingsObj.setSettings(s)\n\n def getId(self):\n request = self.api.playlists().list(\n part=\"snippet,contentDetails\", maxResults=100, mine=True\n )\n response = request.execute()\n items = response[\"items\"]\n playlists = []\n for item in items:\n playlists.append({\"name\": item[\"snippet\"][\"title\"], \"value\": item[\"id\"]})\n\n id = prompt(\n {\n \"type\": \"list\",\n \"name\": \"Select Playlist\",\n \"message\": \"Select Playlist\",\n \"choices\": playlists,\n }\n )\n return id[\"Select Playlist\"]\n\n def getElements(self):\n print(\"Getting elements of playlist...\")\n\n items = self.getElementsPage()\n elements = []\n for item in items:\n elements.append(\n (item[\"contentDetails\"][\"videoId\"], item[\"snippet\"][\"title\"])\n )\n\n return elements\n\n def getElementsPage(self, page=None):\n\n request = self.api.playlistItems().list(\n part=\"contentDetails, snippet\", playlistId=self.id, pageToken=page\n )\n try:\n response = request.execute()\n except Exception as e:\n print(e)\n exit()\n\n items = response[\"items\"]\n page = response.get(\"nextPageToken\")\n if page:\n items += self.getElementsPage(page=page)\n\n return items\n","sub_path":"youtube/get_playlists.py","file_name":"get_playlists.py","file_ext":"py","file_size_in_byte":1805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"149702002","text":"from globals import *\nimport http\nfrom response import Response\n\n\nclass SubdomainCDN:\n def __init__(self, request, socket):\n self.request = request\n self.socket = socket\n\n def handle(self):\n q = self.request.query # type: str\n try:\n args = extract_args(q)\n except ValueError:\n args = None\n\n perm = False\n if args and 'k' in args:\n perm = check_content_key(args['k'])\n\n # structure: eb/eb_img.png?k={key}\n if q.startswith('eb') and args:\n if not perm:\n raise http.ForbiddenError('Key invalid.')\n\n filepath = q.split('?')[0].replace('eb', 'eight_ball_im', 1)\n filename = filepath.split('/')[1]\n\n resp = Response().content_type(http.PNG).set_filename(filename).set_file(filepath)\n\n self.socket.send(resp.read())\n\n","sub_path":"old_files/cdn.py","file_name":"cdn.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"634428948","text":"\"\"\"Command line interfaces for bonus-ing Workers\"\"\"\n\nimport logging\n\nimport click\nimport csv\nimport json\n\nfrom amti import actions\nfrom amti import settings\nfrom amti import utils\n\n\nlogger = logging.getLogger(__name__)\n\n\n@click.command(\n context_settings={\n 'help_option_names': ['--help', '-h']\n })\n@click.option(\n '--file', '-f',\n type=click.Path(exists=True, file_okay=True, dir_okay=False),\n help=\"Path to file of WorkerIds to block.\")\n@click.option(\n '--live', '-l',\n is_flag=True,\n help='View the status of HITs from the live MTurk site.')\ndef bonus_workers(file, live):\n \"\"\"Send notification message to workers.\n\n Given a space seperated list of WorkerIds (IDS), or a path to\n a CSV of WorkerIds, send a notification to each worker. \n \"\"\"\n env = 'live' if live else 'sandbox'\n\n client = utils.mturk.get_mturk_client(env)\n\n data = utils.workers.read_data_from_csv(file)\n bonus_sum = sum([float(item[\"BonusAmount\"]) for item in data])\n num_workers = len(set([item[\"WorkerId\"] for item in data]))\n\n cost_approved = click.confirm(f'Approve cost (~{bonus_sum:.2f} USD) for {num_workers} unique WorkerIds and {len(data)} bonuses. Proceed?')\n if not cost_approved:\n logger.info(' The bonus cost was not approved. Aborting bonus send.')\n return\n \n for worker_dict in data:\n logger.info(f\"Sending bonus to workers: {worker_dict['WorkerId']}\")\n\n response = client.send_bonus(\n AssignmentId=worker_dict['AssignmentId'],\n BonusAmount=worker_dict['BonusAmount'],\n WorkerId=worker_dict['WorkerId'],\n Reason=worker_dict['Reason'],\n )\n\n logger.info('Finished sending bonuses.')","sub_path":"amti/clis/bonus.py","file_name":"bonus.py","file_ext":"py","file_size_in_byte":1721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"108773389","text":"# Importing the required libraries\r\n\r\nimport cv2\r\nimport matplotlib.pyplot as plt\r\nimport time\r\nimport math\r\nimport ezdxf\r\nimport numpy as np\r\nfrom matplotlib.patches import Polygon\r\nfrom svglib.svglib import svg2rlg\r\nfrom reportlab.graphics import renderPM\r\nfrom math import sqrt, sin, cos, pi, asin\r\n\r\n\r\n\r\n\r\n# ---------------------------------------------------------------------------------------------------------------#\r\n\r\n# All of the required functions at one place\r\n\r\n\r\ndef sheet(length_sheet, width_sheet): #gives area of sheet\r\n vertices_sheet = [(0, 0, 0), (int(length_sheet), 0, 0), (int(length_sheet), (int(width_sheet)), 0), (0, (int(width_sheet)), 0)]\r\n sheet_area = 0\r\n for i in range(len(vertices_sheet)):\r\n if i <= (len(vertices_sheet) - 2):\r\n b = (vertices_sheet[i + 1][1] + vertices_sheet[i][1]) / 2\r\n e = vertices_sheet[i + 1][0] - vertices_sheet[i][0]\r\n sheet_area = sheet_area + (b * e)\r\n else:\r\n b = (vertices_sheet[i][1] + vertices_sheet[0][1]) / 2\r\n e = vertices_sheet[0][0] - vertices_sheet[i][0]\r\n sheet_area = sheet_area + (b * e)\r\n area_of_sheet = abs(sheet_area) # area of piece after calculation\r\n return length_sheet, width_sheet, area_of_sheet\r\n\r\n\r\ndef horizontal_checking(list_shape1):\r\n right_x1 = 0\r\n left_x1 = 0\r\n for x1 in range(len(list_shape1)): # Get the lowest y-coordinate of shape-1\r\n if x1 == 0: # Get the highest y-coordinate of shape-1\r\n right_x1 = list_shape1[x1][0]\r\n a = list_shape1[0 + int(x1)]\r\n left_x1 = list_shape1[x1][0]\r\n b = list_shape1[0 + int(x1)]\r\n else:\r\n if list_shape1[x1][0] == right_x1:\r\n if list_shape1[x1][1] < a[1]:\r\n a = list_shape1[0 + int(x1)]\r\n if list_shape1[x1][0] > right_x1:\r\n right_x1 = list_shape1[x1][0]\r\n a = list_shape1[0 + int(x1)]\r\n if list_shape1[x1][0] == left_x1:\r\n if list_shape1[x1][1] < b[1]:\r\n b = list_shape1[0 + int(x1)]\r\n if list_shape1[x1][0] < left_x1:\r\n left_x1 = list_shape1[x1][0]\r\n b = list_shape1[0 + int(x1)]\r\n return b, a\r\n\r\n\r\ndef clockwise_list(list_shape2):\r\n j = 0\r\n list_shape1 = []\r\n for i in range(len(list_shape2)): # Check => shape co-ordinates are clockwise or anti-clockwise\r\n if i != len(list_shape2) - 1:\r\n j = j + ((list_shape2[i + 1][0] - list_shape2[i][0]) * (list_shape2[i + 1][1] + list_shape2[i][1]))\r\n else:\r\n j = j + ((list_shape2[0][0] - list_shape2[i][0]) * (list_shape2[0][1] + list_shape2[i][1]))\r\n if j > 0: # If shape co-ordinates are anti-clockwise then arrange it in clockwise\r\n list_shape1 = list_shape2\r\n else:\r\n for i in range(len(list_shape2)):\r\n list_shape1.append(\r\n list_shape2[len(list_shape2) - 1 - i]) # list_shape1 = clockwise arrangement of co-ordinates\r\n return list_shape1\r\n\r\ndef area_at_the_left_of_given_piece(list_shape1, x, y):\r\n list = []\r\n list1 = [y]\r\n for i in range(len(list_shape1)): # Fatching co_ordinates between leftmost high and leftmost low co-ordinates\r\n if list_shape1[i] == x:\r\n while i <= len(list_shape1):\r\n list.append(list_shape1[i])\r\n if list_shape1[i] == y:\r\n break\r\n elif list_shape1[i] != y and i == len(list_shape1) - 1:\r\n for j in range(len(list_shape1)):\r\n if list_shape1 != y:\r\n list.append(list_shape1[j])\r\n if list_shape1[j] == y:\r\n break\r\n break\r\n i += 1\r\n for i in range(len(list) - 1):\r\n list1.append(list[i])\r\n return list1\r\n\r\ndef vertical_checking(list_shape1):\r\n high_y1 = 0\r\n low_y1 = 0\r\n for y1 in range(len(list_shape1)): # Get the lowest y-coordinate of shape-1\r\n if y1 == 0: # Get the highest y-coordinate of shape-1\r\n high_y1 = list_shape1[y1][1]\r\n a = list_shape1[0 + int(y1)]\r\n low_y1 = list_shape1[y1][1]\r\n b = list_shape1[0 + int(y1)]\r\n else:\r\n if list_shape1[y1][1] == high_y1:\r\n if list_shape1[y1][0] < a[0]:\r\n a = list_shape1[0 + int(y1)]\r\n if list_shape1[y1][1] > high_y1:\r\n high_y1 = list_shape1[y1][1]\r\n a = list_shape1[0 + int(y1)]\r\n if list_shape1[y1][1] == low_y1:\r\n if list_shape1[y1][0] < b[0]:\r\n b = list_shape1[0 + int(y1)]\r\n if list_shape1[y1][1] < low_y1:\r\n low_y1 = list_shape1[y1][1]\r\n b = list_shape1[0 + int(y1)]\r\n return b, a\r\n\r\ndef side(a, b, c):\r\n \"\"\" Returns a position of the point c relative to the line going through a and b\r\n Points a, b are expected to be different\r\n \"\"\"\r\n d = (c[1] - a[1]) * (b[0] - a[0]) - (b[1] - a[1]) * (c[0] - a[0])\r\n return 1 if d > 0 else (-1 if d < 0 else 0)\r\n\r\n\r\ndef is_point_in_closed_segment(a, b, c): # Returns True if c is inside closed segment, False otherwise. a, b, c are expected to be collinear\r\n\r\n if a[0] < b[0]:\r\n return a[0] <= c[0] and c[0] <= b[0]\r\n if b[0] < a[0]:\r\n return b[0] <= c[0] and c[0] <= a[0]\r\n\r\n if a[1] < b[1]:\r\n return a[1] <= c[1] and c[1] <= b[1]\r\n if b[1] < a[1]:\r\n return b[1] <= c[1] and c[1] <= a[1]\r\n\r\n return a[0] == c[0] and a[1] == c[1]\r\n\r\n\r\ndef closed_segment_intersect(a, b, c, d): # Verifies if closed segments a, b, c, d do intersect.\r\n if a == b:\r\n return a == c or a == d\r\n if c == d:\r\n return c == a or c == b\r\n\r\n s1 = side(a, b, c)\r\n s2 = side(a, b, d)\r\n\r\n # All points are collinear\r\n if s1 == 0 and s2 == 0:\r\n return \\\r\n is_point_in_closed_segment(a, b, c) or is_point_in_closed_segment(a, b, d) or \\\r\n is_point_in_closed_segment(c, d, a) or is_point_in_closed_segment(c, d, b)\r\n\r\n # No touching and on the same side\r\n if s1 and s1 == s2:\r\n return False\r\n\r\n s1 = side(c, d, a)\r\n s2 = side(c, d, b)\r\n\r\n # No touching and on the same side\r\n if s1 and s1 == s2:\r\n return False\r\n return True\r\n\r\n\r\ndef is_point_in_closed_segment_vertical(a, b, c): # Returns True if c is inside closed segment, False otherwise. a, b, c are expected to be collinear\r\n if a[0] < b[0]:\r\n return a[0] < c[0] and c[0] < b[0]\r\n if b[0] < a[0]:\r\n return b[0] < c[0] and c[0] < a[0]\r\n\r\n if a[1] < b[1]:\r\n return a[1] < c[1] and c[1] < b[1]\r\n if b[1] < a[1]:\r\n return b[1] < c[1] and c[1] < a[1]\r\n\r\n return a[0] == c[0] and a[1] == c[1]\r\n\r\n\r\ndef minimum_x_coordinate(list_shape1): #\r\n for i in range(len(list_shape1)):\r\n if i == 0:\r\n min_x = list_shape1[0][0]\r\n else:\r\n if min_x >= list_shape1[i][0]:\r\n min_x = list_shape1[i][0]\r\n return min_x\r\n\r\n\r\ndef maximum_x_coordinate(list_1): #\r\n for i in range(len(list_1)):\r\n if i == 0:\r\n max_x = list_1[0][0]\r\n else:\r\n if max_x <= list_1[i][0]:\r\n max_x = list_1[i][0]\r\n return max_x\r\n\r\n\r\ndef minimum_y_coordinate(list_2): #\r\n for i in range(len(list_2)):\r\n if i == 0:\r\n min_y = list_2[0][1]\r\n else:\r\n if min_y >= list_2[i][1]:\r\n min_y = list_2[i][1]\r\n return min_y\r\n\r\n\r\ndef maximum_y_coordinate(list_2): #\r\n for i in range(len(list_2)):\r\n if i == 0:\r\n max_y = list_2[0][1]\r\n else:\r\n if max_y <= list_2[i][1]:\r\n max_y = list_2[i][1]\r\n return max_y\r\n\r\ndef intersection_of_shapes(list_shape1, list_shape2):\r\n if (list_shape1 == list_shape2):\r\n return True\r\n for y1 in range(len(list_shape1)): # Get the lowest y-coordinate of shape-1\r\n if y1 == 0: # Get the highest y-coordinate of shape-1\r\n high_y1 = list_shape1[y1][1]\r\n low_y1 = list_shape1[y1][1]\r\n else:\r\n if list_shape1[y1][1] > high_y1:\r\n high_y1 = list_shape1[y1][1]\r\n if list_shape1[y1][1] < low_y1:\r\n low_y1 = list_shape1[y1][1]\r\n\r\n for y2 in range(len(list_shape2)): # Get the lowest y-coordinate of shape-2\r\n if y2 == 0: # Get the highest y-coordinate of shape-2\r\n high_y2 = list_shape2[y2][1]\r\n low_y2 = list_shape2[y2][1]\r\n else:\r\n if list_shape2[y2][1] > high_y2:\r\n high_y2 = list_shape2[y2][1]\r\n if list_shape2[y2][1] < low_y2:\r\n low_y2 = list_shape2[y2][1]\r\n\r\n if low_y1 > high_y2 or low_y2 > high_y1: # Checks the condition for intersection\r\n return False\r\n\r\n for x1 in range(len(list_shape1)): # Get the leftmost x-coordinate of shape-1\r\n if x1 == 0: # Get the rightmost x-coordinate of shape-1\r\n right_x1 = list_shape1[x1][0]\r\n left_x1 = list_shape1[x1][0]\r\n else:\r\n if list_shape1[x1][0] > right_x1:\r\n right_x1 = list_shape1[x1][0]\r\n if list_shape1[x1][0] < left_x1:\r\n left_x1 = list_shape1[x1][0]\r\n\r\n for x2 in range(len(list_shape2)): # Get the leftmost x-coordinate of shape-2\r\n if x2 == 0: # Get the rightmost x-coordinate of shape-2\r\n right_x2 = list_shape2[x2][0]\r\n left_x2 = list_shape2[x2][0]\r\n else:\r\n if list_shape2[x2][0] > right_x2:\r\n right_x2 = list_shape2[x2][0]\r\n if list_shape2[x2][0] < left_x2:\r\n left_x2 = list_shape2[x2][0]\r\n\r\n if left_x1 > right_x2 or left_x2 > right_x1: # Checks the condition for intersection\r\n return False\r\n\r\n for e1 in range(len(list_shape1)): # Creates the edges from co-ordinates of shape-1\r\n if e1 == len(list_shape1) - 1:\r\n a = list_shape1[0 + int(e1)]\r\n b = list_shape1[0]\r\n else:\r\n a = list_shape1[0 + int(e1)]\r\n b = list_shape1[1 + int(e1)]\r\n for e2 in range(len(list_shape2)): # Creates the edges from co-ordinates of shape-2\r\n if e2 == len(list_shape2) - 1:\r\n c = list_shape2[0 + int(e2)]\r\n d = list_shape2[0]\r\n else:\r\n c = list_shape2[0 + int(e2)]\r\n d = list_shape2[1 + int(e2)]\r\n if (closed_segment_intersect(a, b, c, d) == True):\r\n return True\r\n break\r\n if (closed_segment_intersect(a, b, c, d) == True): # Checks the condition for intersection\r\n return True\r\n else:\r\n return False\r\n\r\n\r\ndef arg_shapes(list2, length_sheet, width_sheet): # placing shapes on top right corner above the sheet\r\n list4 = []\r\n for i in range(len(list2)):\r\n list3 = []\r\n minimum_x = minimum_x_coordinate(list2[i])\r\n minimum_y = minimum_y_coordinate(list2[i])\r\n maximum_x = maximum_x_coordinate(list2[i])\r\n for j in range(len(list2[i])):\r\n a = (int(length_sheet) + (list2[i][j][0] - minimum_x)) - (maximum_x - minimum_x)\r\n b = list2[i][j][1] - minimum_y + int(width_sheet)\r\n c = a, b, 0\r\n list3.append(c)\r\n list4.append(list3)\r\n return list4\r\n\r\n\r\ndef image(img, contour_type): # processing the image file type and extracting coordinates from the image\r\n # convert to RGB\r\n scale_percent = 26.46 # percent of original size\r\n width = int(img.shape[1] * scale_percent / 100)\r\n height = int(img.shape[0] * scale_percent / 100)\r\n dim = (width, height)\r\n # resize image\r\n img = cv2.resize(img, dim, interpolation=cv2.INTER_AREA)\r\n image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\r\n image = cv2.flip(image,0)\r\n # convert to grayscale\r\n gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\r\n\r\n # create a binary thresholded image\r\n _, binary = cv2.threshold(gray, 225, 255, cv2.THRESH_BINARY_INV)\r\n list_of_contours = []\r\n list_of_hierarchy = []\r\n list_of_contours_final = []\r\n number_of_contour = []\r\n list_of_contours_not_used = []\r\n\r\n if contour_type == 1:\r\n # find the contours from the thresholded image\r\n contours, hierarchy = cv2.findContours(binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\r\n\r\n for i in range(len(hierarchy)):\r\n list_of_hierarchy = []\r\n for j in range(len(hierarchy[i])):\r\n a = hierarchy[i][j][0]\r\n b = hierarchy[i][j][1]\r\n c = hierarchy[i][j][2]\r\n d = hierarchy[i][j][3]\r\n e = a,b,c,d\r\n list_of_hierarchy.append(e)\r\n\r\n for i in range(len(contours)):\r\n list_used_to_find_contours = []\r\n for j in range(len(contours[i])):\r\n for k in range(len(contours[i][j])):\r\n a = contours[i][j][k][0]\r\n b = contours[i][j][k][1]\r\n c = a,b,0\r\n list_used_to_find_contours.append(c)\r\n if len(list_used_to_find_contours) != 1 :\r\n list_of_contours.append(list_used_to_find_contours)\r\n\r\n for i in range(len(list_of_hierarchy)) :\r\n for j in range(len(list_of_hierarchy)) :\r\n if i == list_of_hierarchy[j][2] :\r\n for k in range(len(list_of_hierarchy)) :\r\n if i == list_of_hierarchy[k][3] :\r\n n = list_of_hierarchy[i][3]\r\n if list_of_hierarchy[n][3] == -1:\r\n number_of_contour.append(n)\r\n if list_of_hierarchy[i][2] == -1 :\r\n n = list_of_hierarchy[i][3]\r\n number_of_contour.append(n)\r\n\r\n for i in range(len(list_of_contours)) :\r\n if (i in number_of_contour) == True :\r\n list_of_contours_final.append(list_of_contours[i])\r\n else :\r\n list_of_contours_not_used.append(list_of_contours[i])\r\n\r\n if len(list_of_contours_final) == 0 :\r\n index = 0\r\n for i in range(len(list_of_hierarchy)):\r\n if (list_of_hierarchy[i][2],list_of_hierarchy[i][3]) == (-1,-1):\r\n index = index + 1\r\n if index == len(list_of_hierarchy):\r\n list_of_contours_final = list_of_contours\r\n\r\n # font\r\n font = cv2.FONT_HERSHEY_SIMPLEX\r\n for i in range(len(list_of_contours_final)):\r\n text = str(i)\r\n org = list_of_contours_final[i][0][0], list_of_contours_final[i][0][1] - 3\r\n image = cv2.putText(image, text, org, font, fontScale=0.5, color=(0, 0, 255), thickness=1)\r\n\r\n # draw all contours\r\n image = cv2.drawContours(image, contours, -1, (0, 255, 0), 1)\r\n\r\n # show the image with the drawn contours\r\n plt.imshow(image)\r\n plt.show()\r\n return list_of_contours_final\r\n\r\n elif contour_type == 2:\r\n contours, hierarchy = cv2.findContours(binary, cv2.RETR_TREE,\r\n cv2.CHAIN_APPROX_SIMPLE) # cv2.RETR_CCOMP cv2.RETR_TREE cv2.RETR_EXTERNAL\r\n\r\n for i in range(len(contours)):\r\n list_used_to_find_contours = []\r\n for j in range(len(contours[i])):\r\n for k in range(len(contours[i][j])):\r\n a = contours[i][j][k][0]\r\n b = contours[i][j][k][1]\r\n c = a, b, 0\r\n list_used_to_find_contours.append(c)\r\n if len(list_used_to_find_contours) != 1:\r\n list_of_contours_final.append(list_used_to_find_contours)\r\n\r\n # font\r\n font = cv2.FONT_HERSHEY_SIMPLEX\r\n for i in range(len(list_of_contours_final)):\r\n text = str(i)\r\n org = list_of_contours_final[i][0][0], list_of_contours_final[i][0][1] - 3\r\n image = cv2.putText(image, text, org, font , fontScale = 0.5, color = (0, 0, 255), thickness = 1)\r\n\r\n # draw all contours\r\n image = cv2.drawContours(image, contours, -1, (0, 255, 0), 1)\r\n\r\n # show the image with the drawn contours\r\n plt.imshow(image)\r\n plt.show()\r\n return list_of_contours_final\r\n\r\n else :\r\n return print(\" You have entered an invalid choice \")\r\n\r\n\r\ndef nested_shapes_coordinates(new_vertices_other_shapes): # calculation for nesting algorithm\r\n left_shape = []\r\n right_shape = []\r\n new_vertices_other_shapes_clockwise = []\r\n\r\n for i in range(len(new_vertices_other_shapes)):\r\n new_vertices_other_shapes_clockwise.append(clockwise_list(new_vertices_other_shapes[i]))\r\n new_vertices_other_shapes = new_vertices_other_shapes_clockwise\r\n\r\n vertices_nested_again_shapes = []\r\n for i in range(len(new_vertices_other_shapes)):\r\n right_shape = new_vertices_other_shapes[i]\r\n max_y = maximum_y_coordinate(right_shape)\r\n min_y = minimum_y_coordinate(right_shape)\r\n min_x = minimum_x_coordinate(right_shape)\r\n max_x = maximum_x_coordinate(right_shape)\r\n left_side_coordinates = []\r\n for i in range(len(vertices_nested_again_shapes)):\r\n points_of_left_shape = []\r\n if right_shape == vertices_nested_again_shapes[i]:\r\n continue\r\n for j in range(len(vertices_nested_again_shapes[i])):\r\n if vertices_nested_again_shapes[i][j][0] <= min_x and vertices_nested_again_shapes[i][j][1] <= max_y and \\\r\n vertices_nested_again_shapes[i][j][1] >= min_y:\r\n points_of_left_shape.append(vertices_nested_again_shapes[i][j])\r\n if len(points_of_left_shape) != 0:\r\n left_side_coordinates.append(vertices_nested_again_shapes[i])\r\n\r\n if len(left_side_coordinates) == 0 or len(left_side_coordinates) != 0:\r\n var_1 = 0\r\n var_3 = (-1, -1, -1)\r\n var_6 = (-1, -1, -1)\r\n var_4 = 10000\r\n for i in range(len(vertices_nested_again_shapes)):\r\n for j in range(len(vertices_nested_again_shapes[i])):\r\n if vertices_nested_again_shapes[i][j][1] <= min_y and vertices_nested_again_shapes[i][j][0] <= min_x:\r\n var_2 = vertices_nested_again_shapes[i][j]\r\n if var_2[1] > var_1:\r\n var_1 = var_2[1]\r\n var_3 = var_2\r\n if vertices_nested_again_shapes[i][j][1] >= max_y and vertices_nested_again_shapes[i][j][0] <= min_x:\r\n var_5 = vertices_nested_again_shapes[i][j]\r\n if var_5[1] < var_4:\r\n var_4 = var_5[1]\r\n var_6 = var_5\r\n if var_3 == (-1, -1, -1) :\r\n left_side_coordinates = []\r\n else:\r\n for i in range(len(vertices_nested_again_shapes)):\r\n for j in range(len(vertices_nested_again_shapes[i])):\r\n if var_3 == vertices_nested_again_shapes[i][j] or var_6 == vertices_nested_again_shapes[i][j]:\r\n left_side_coordinates.append(vertices_nested_again_shapes[i])\r\n break\r\n\r\n slope_points = []\r\n for j in range(len(left_side_coordinates)):\r\n for i in range(len(left_side_coordinates[j])):\r\n if i != len(left_side_coordinates[j]) - 1:\r\n ax1 = round(left_side_coordinates[j][i][0], 1)\r\n ay1 = round(left_side_coordinates[j][i][1], 1)\r\n ax2 = round(left_side_coordinates[j][i + 1][0], 1)\r\n ay2 = round(left_side_coordinates[j][i + 1][1], 1)\r\n else:\r\n ax1 = round(left_side_coordinates[j][i][0], 1)\r\n ay1 = round(left_side_coordinates[j][i][1], 1)\r\n ax2 = round(left_side_coordinates[j][0][0], 1)\r\n ay2 = round(left_side_coordinates[j][0][1], 1)\r\n if ax1 == ax2 and ay1 == ay2:\r\n continue\r\n elif ay1 == ay2:\r\n if ax1 < ax2:\r\n inc_value = ax1\r\n while inc_value != ax2:\r\n inc_value = round(inc_value + 0.1, 1)\r\n slope_points.append((inc_value, ay1, 0))\r\n else:\r\n inc_value = ax1\r\n while inc_value != ax2:\r\n inc_value = round(inc_value - 0.1, 1)\r\n slope_points.append((inc_value, ay1, 0))\r\n elif ax1 == ax2:\r\n if ay1 < ay2:\r\n inc_value = ay1\r\n while inc_value != ay2:\r\n inc_value = round(inc_value + 0.1, 1)\r\n slope_points.append((ax1, inc_value, 0))\r\n else:\r\n inc_value = ay1\r\n while inc_value != ay2:\r\n inc_value = round(inc_value - 0.1, 1)\r\n slope_points.append((ax1, inc_value, 0))\r\n elif ax1 != ax2 or ay1 != ay2:\r\n slope = (ay2 - ay1) / (ax2 - ax1)\r\n inc_value = ay1\r\n if ay2 > ay1:\r\n while (round(float(inc_value), 1) != ay2):\r\n inc_value = round(inc_value + 0.1, 1)\r\n obt_value = round(float((inc_value - ay1 + (slope * ax1)) / slope), 1)\r\n slope_points.append((obt_value, inc_value, 0))\r\n else:\r\n while (round(float(inc_value), 1) != ay2):\r\n inc_value = round(inc_value - 0.1, 1)\r\n obt_value = round(float((inc_value - ay1 + (slope * ax1)) / slope), 1)\r\n slope_points.append((obt_value, inc_value, 0))\r\n\r\n x_for_right_shape ,y_for_right_shape= vertical_checking(right_shape)\r\n min_points = area_at_the_left_of_given_piece(right_shape,x_for_right_shape,y_for_right_shape)\r\n\r\n left_side_coordinates = []\r\n for i in range(len(min_points) - 1):\r\n ax1 = round(min_points[i][0], 1)\r\n ay1 = round(min_points[i][1], 1)\r\n ax2 = round(min_points[i + 1][0], 1)\r\n ay2 = round(min_points[i + 1][1], 1)\r\n if ax1 == ax2 and ay1 == ay2:\r\n continue\r\n elif ay1 == ay2:\r\n if ax1 < ax2:\r\n inc_value = ax1\r\n while inc_value != ax2:\r\n left_side_coordinates.append((inc_value, ay1, 0))\r\n inc_value = round(inc_value + 0.1, 1)\r\n left_side_coordinates.append((ax2,ay1,0))\r\n else:\r\n inc_value = ax1\r\n while inc_value != ax2:\r\n left_side_coordinates.append((inc_value, ay1, 0))\r\n inc_value = round(inc_value - 0.1, 1)\r\n left_side_coordinates.append((ax2, ay1, 0))\r\n elif ax1 == ax2:\r\n if ay1 < ay2:\r\n inc_value = ay1\r\n left_side_coordinates.append((ax1,ay1,0))\r\n while inc_value != ay2:\r\n inc_value = round(inc_value + 0.1, 1)\r\n left_side_coordinates.append((ax1, inc_value, 0))\r\n else:\r\n inc_value = ay1\r\n left_side_coordinates.append((ax1,ay1,0))\r\n while inc_value != ay2:\r\n inc_value = round(inc_value - 0.1, 1)\r\n left_side_coordinates.append((ax1, inc_value, 0))\r\n elif ax1 != ax2 or ay1 != ay2:\r\n slope = (ay2 - ay1) / (ax2 - ax1)\r\n inc_value = ax1\r\n if ax2 > ax1:\r\n left_side_coordinates.append((ax1,ay1,0))\r\n while (round(float(inc_value), 1) != ax2):\r\n inc_value = round(inc_value + 0.1, 1)\r\n obt_value = round(float((slope * (inc_value - ax1)) + ay1), 1)\r\n left_side_coordinates.append((inc_value, obt_value, 0))\r\n else:\r\n left_side_coordinates.append((ax1,ay1,0))\r\n while (round(float(inc_value), 1) != ax2):\r\n inc_value = round(inc_value - 0.1, 1)\r\n obt_value = round(float((slope * (inc_value - ax1)) + ay1), 1)\r\n left_side_coordinates.append((inc_value, obt_value, 0))\r\n\r\n\r\n short_distances = []\r\n for i in range(len(slope_points)):\r\n y_slope_point = round(slope_points[i][1], 1)\r\n x_slope_point = round(slope_points[i][0], 1)\r\n for j in range(len(left_side_coordinates)):\r\n y_min_point = round(left_side_coordinates[j][1], 1)\r\n x_min_point = round(left_side_coordinates[j][0], 1)\r\n if y_min_point == y_slope_point:\r\n short_distance = round(x_min_point - x_slope_point,1)\r\n short_distances.append(short_distance)\r\n\r\n shortest_distance_x = min_x\r\n for i in range(len(short_distances)):\r\n if i == 0:\r\n shortest_distance_x = short_distances[i]\r\n if short_distances[i] < shortest_distance_x:\r\n shortest_distance_x = short_distances[i]\r\n right_shape_nested = []\r\n for i in range(len(right_shape)):\r\n if shortest_distance_x == 0:\r\n a = right_shape[i][0] + 1\r\n else:\r\n a = right_shape[i][0] - (shortest_distance_x - 1)\r\n right_shape_nested.append((a, right_shape[i][1], 0))\r\n vertices_nested_again_shapes.append(right_shape_nested)\r\n return vertices_nested_again_shapes\r\n\r\n\r\ndef circle(radius, length_sheet, width_sheet): # Developing a polygon which contains 360 sides and also resembles circle completely\r\n Circle_shape = []\r\n angle = 1\r\n times = 360 / angle\r\n x = radius\r\n y = radius\r\n theta = 0\r\n for i in range(int(times)):\r\n point_circle = (round(int(length_sheet) + (x + ((radius) * sin(theta * (pi / 180)))) - (2 * radius), 6),\r\n round(int(width_sheet) + (y + ((radius) * cos(theta * (pi / 180)))), 6), 0)\r\n Circle_shape.append(point_circle)\r\n theta = theta + angle\r\n return Circle_shape\r\n\r\n\r\ndef triangle(temp1_triangle, temp2_triangle, temp3_triangle, length_sheet, width_sheet): # Developing all types of Triangle\r\n # Equilateral Triangle\r\n if temp1_triangle == temp2_triangle == temp3_triangle:\r\n Triangle_shape = [(int(length_sheet) - temp2_triangle, int(width_sheet), 0),\r\n (int(length_sheet), int(width_sheet), 0), (\r\n (int(length_sheet) + round((temp1_triangle / 2), 2)) - temp2_triangle,\r\n round(sqrt((temp1_triangle * 2) - ((temp1_triangle / 2) * 2)), 2) + int(width_sheet), 0)]\r\n # Isosceles Triangle\r\n elif temp1_triangle == temp2_triangle or temp2_triangle == temp3_triangle or temp3_triangle == temp1_triangle:\r\n if temp1_triangle == temp3_triangle:\r\n var_t2 = temp2_triangle\r\n var_t1 = temp1_triangle\r\n elif temp1_triangle == temp2_triangle:\r\n var_t2 = temp3_triangle\r\n var_t1 = temp2_triangle\r\n else:\r\n var_t2 = temp1_triangle\r\n var_t1 = temp3_triangle\r\n # Isosceles Triangle's calculations\r\n Triangle_shape = [(int(length_sheet) - var_t2, int(width_sheet), 0),\r\n (int(length_sheet), int(width_sheet), 0), ((int(length_sheet) + (var_t2 / 2)) - var_t2, round(\r\n sqrt((var_t1 * var_t1) - ((var_t2 / 2) * (var_t2 / 2))), 2) + int(width_sheet), 0)]\r\n # Scalene Triangle\r\n else:\r\n if temp2_triangle > temp1_triangle and temp2_triangle > temp3_triangle:\r\n var_t1 = temp1_triangle\r\n var_t2 = temp2_triangle\r\n var_t3 = temp3_triangle\r\n elif temp1_triangle > temp3_triangle and temp1_triangle > temp2_triangle:\r\n var_t2 = temp1_triangle\r\n var_t1 = temp3_triangle\r\n var_t3 = temp2_triangle\r\n else:\r\n var_t2 = temp3_triangle\r\n var_t1 = temp1_triangle\r\n var_t3 = temp2_triangle\r\n # Scalene Triangle's calculations\r\n S = (var_t1 + var_t2 + var_t3) / 2\r\n A = sqrt(S * (S - var_t1) * (S - var_t2) * (S - var_t3))\r\n H = round((2 * A) / var_t2, 2)\r\n G = asin(H / var_t1)\r\n F = round(var_t1 * cos(G), 2)\r\n E = asin(H / var_t3)\r\n I = var_t3 * cos(E)\r\n W = round(((F + I) - var_t2) / 2, 2)\r\n R = F - W\r\n X = sqrt((var_t1 * var_t1) - (R * R))\r\n T = round(X - H, 2)\r\n Triangle_shape = [(int(length_sheet) - var_t2, int(width_sheet), 0), (int(length_sheet), int(width_sheet), 0),\r\n ((int(length_sheet) + (F - W)) - var_t2, int(width_sheet) + (H + T), 0)]\r\n return Triangle_shape\r\n\r\n\r\n\r\ndef square(Square, length_sheet, width_sheet): # Developing square\r\n Square_shape = [(int(length_sheet) - Square, int(width_sheet), 0),\r\n (int(length_sheet) - Square, Square + int(width_sheet), 0),\r\n (int(length_sheet), Square + int(width_sheet), 0), (int(length_sheet), int(width_sheet), 0)]\r\n return Square_shape\r\n\r\n\r\ndef rectangle(Rect_length, Rect_width, length_sheet, width_sheet): # Developing rectangle\r\n Rectangle_shape = [(int(length_sheet) - Rect_length, int(width_sheet), 0),\r\n (int(length_sheet) - Rect_length, Rect_width + int(width_sheet), 0),\r\n (int(length_sheet), Rect_width + int(width_sheet), 0), (int(length_sheet), int(width_sheet), 0)]\r\n return Rectangle_shape\r\n\r\n\r\ndef pentagon(length_Pentagon, length_sheet, width_sheet): # Developing pentagon\r\n # Calculation for pentagon\r\n Pentagon_shape = [(int(length_sheet) + (round(length_Pentagon * cos((2 * pi) / 5), 2)) - (\r\n round((2 * length_Pentagon * cos((2 * pi) / 5)) + length_Pentagon, 2)), int(width_sheet), 0),\r\n ((int(length_sheet) + (\r\n round((length_Pentagon * cos((2 * pi) / 5)), 2) + length_Pentagon)) - (\r\n round((2 * length_Pentagon * cos((2 * pi) / 5)) + length_Pentagon, 2)),\r\n int(width_sheet),\r\n 0),\r\n (int(length_sheet), round(length_Pentagon * sin((2 * pi) / 5), 2) + int(width_sheet), 0),\r\n ((int(length_sheet) + round((length_Pentagon * cos((2 * pi) / 5)) + (length_Pentagon / 2),\r\n 2)) - (\r\n round((2 * length_Pentagon * cos((2 * pi) / 5)) + length_Pentagon, 2)),\r\n round((length_Pentagon * sin((2 * pi) / 5)) + (length_Pentagon * cos(pi / 3.33)),\r\n 2) + int(\r\n width_sheet), 0),\r\n (int(length_sheet) - (\r\n round((2 * length_Pentagon * cos((2 * pi) / 5)) + length_Pentagon, 2)),\r\n round(length_Pentagon * sin((2 * pi) / 5), 2) + int(width_sheet), 0)]\r\n return Pentagon_shape\r\n\r\n\r\ndef hexagon(length_Hexagon, length_sheet, width_sheet): # Developing hexagon\r\n Hexagon_shape = [(int(length_sheet) + (round(length_Hexagon * cos(pi / 6), 2)) - (\r\n round(length_Hexagon * 2 * cos(pi / 6), 2)), int(width_sheet), 0),\r\n (int(length_sheet), round(length_Hexagon * sin(pi / 6), 2) + int(width_sheet), 0),\r\n (int(length_sheet),\r\n round((length_Hexagon * sin(pi / 6)) + length_Hexagon, 2) + int(width_sheet), 0),\r\n ((int(length_sheet) + round(length_Hexagon * cos(pi / 6), 2)) - (\r\n round(length_Hexagon * 2 * cos(pi / 6), 2)),\r\n round(((length_Hexagon * sin(pi / 6)) + length_Hexagon) + (length_Hexagon * sin(pi / 6)),\r\n 2) + int(width_sheet), 0),\r\n (int(length_sheet) - (round(length_Hexagon * 2 * cos(pi / 6), 2)),\r\n round(length_Hexagon * sin(pi / 6) + length_Hexagon, 2) + int(width_sheet), 0),\r\n (int(length_sheet) - (round(length_Hexagon * 2 * cos(pi / 6), 2)),\r\n round(length_Hexagon * sin(pi / 6), 2) + int(width_sheet), 0)]\r\n return Hexagon_shape\r\n\r\n\r\ndef polygon(Polygon_shape, length_sheet, width_sheet): # Developing polygon\r\n Polygon_shape_final = []\r\n minimum_x = minimum_x_coordinate(Polygon_shape)\r\n maximum_x = maximum_x_coordinate(Polygon_shape)\r\n minimum_y = minimum_y_coordinate(Polygon_shape)\r\n # Putting a polygon to the upper-right corner of the sheet\r\n for i in range(len(Polygon_shape)):\r\n origin = (int(length_sheet) + (Polygon_shape[i][0] - minimum_x)) - (maximum_x - minimum_x), \\\r\n Polygon_shape[i][1] - minimum_y + int(width_sheet), 0\r\n Polygon_shape_final.append(origin)\r\n return Polygon_shape_final\r\n\r\n\r\n\r\n\r\ndef freecad_nesting(new_vertices_shapes,length_sheet,width_sheet, freecad_file_address) : # creating Macro file in freecad\r\n # Adding the requisite data to the macro file of FreeCAD\r\n file_object = open(freecad_file_address, \"w+\")\r\n file_object.write(\"#Import the library files \\n\"\r\n \"import FreeCAD,Draft \\n\"\r\n \"import PartDesign \\n\"\r\n \"import PartDesignGui \\n\"\r\n \"import Spreadsheet \\n\"\r\n \"from math import sin,cos,degrees,radians,pi,sqrt,asin \\n \\n\"\r\n \"#Create a new document and activate PartDesign Workbench \\n\"\r\n \"App.newDocument(\\\"Shape\\\") \\n\"\r\n \"Gui.activateWorkbench(\\\"PartDesignWorkbench\\\") \\n\"\r\n \"App.activeDocument().addObject('PartDesign::Body','Body') \\n\"\r\n \"Gui.activeView().setActiveObject('pdbody', App.activeDocument().Body) \\n\"\r\n \"Gui.Selection.clearSelection() \\n\"\r\n \"Gui.Selection.addSelection(App.ActiveDocument.Body) \\n\"\r\n \"App.ActiveDocument.recompute() \\n\")\r\n\r\n file_object.write(\"sheet = [(0.0, 0.0, 0), (\" + str(length_sheet) + \", 0.0, 0), (\" + str(length_sheet) + \", \" + str(width_sheet) + \", 0), (0.0, \" + str(width_sheet) + \", 0)] \\n\")\r\n file_object.write(\"wire = Draft.makeWire(sheet, closed=True) \\n\")\r\n\r\n c = [\"a%d\" % x for x in range(1, len(new_vertices_shapes) + 1)]\r\n for x in range(len(c)): # you can loop over them\r\n file_object.write(str(c[x]) + \"=\" + str(new_vertices_shapes[x]) + \"\\n\")\r\n file_object.write(\"wire = Draft.makeWire(\" + str(c[x]) + \", closed=True) \\n\")\r\n file_object.close()\r\n return print(\"Macro file has been updated/created\")\r\n\r\ndef nested_shapes_coordinates_eff(new_vertices_other_shapes): # calculation for nesting algorithm\r\n left_shape = []\r\n right_shape = []\r\n bottom_shape = []\r\n top_shape = []\r\n new_vertices_other_shapes_clockwise = []\r\n\r\n for i in range(len(new_vertices_other_shapes)):\r\n new_vertices_other_shapes_clockwise.append(clockwise_list(new_vertices_other_shapes[i]))\r\n new_vertices_other_shapes = new_vertices_other_shapes_clockwise\r\n\r\n vertices_nested_again_shapes = []\r\n for i in range(len(new_vertices_other_shapes)):\r\n right_shape = new_vertices_other_shapes[i]\r\n max_y = maximum_y_coordinate(right_shape)\r\n min_y = minimum_y_coordinate(right_shape)\r\n min_x = minimum_x_coordinate(right_shape)\r\n max_x = maximum_x_coordinate(right_shape)\r\n left_side_coordinates = []\r\n for i in range(len(vertices_nested_again_shapes)):\r\n points_of_left_shape = []\r\n if right_shape == vertices_nested_again_shapes[i]:\r\n continue\r\n for j in range(len(vertices_nested_again_shapes[i])):\r\n if vertices_nested_again_shapes[i][j][0] <= min_x and vertices_nested_again_shapes[i][j][1] <= max_y and \\\r\n vertices_nested_again_shapes[i][j][1] >= min_y:\r\n points_of_left_shape.append(vertices_nested_again_shapes[i][j])\r\n if len(points_of_left_shape) != 0:\r\n left_side_coordinates.append(vertices_nested_again_shapes[i])\r\n\r\n if len(left_side_coordinates) == 0 or len(left_side_coordinates) != 0:\r\n var_1 = 0\r\n var_3 = (-1, -1, -1)\r\n var_6 = (-1, -1, -1)\r\n var_4 = 10000\r\n for i in range(len(vertices_nested_again_shapes)):\r\n for j in range(len(vertices_nested_again_shapes[i])):\r\n if vertices_nested_again_shapes[i][j][1] <= min_y and vertices_nested_again_shapes[i][j][0] <= min_x:\r\n var_2 = vertices_nested_again_shapes[i][j]\r\n if var_2[1] > var_1:\r\n var_1 = var_2[1]\r\n var_3 = var_2\r\n if vertices_nested_again_shapes[i][j][1] >= max_y and vertices_nested_again_shapes[i][j][0] <= min_x:\r\n var_5 = vertices_nested_again_shapes[i][j]\r\n if var_5[1] < var_4:\r\n var_4 = var_5[1]\r\n var_6 = var_5\r\n if var_3 == (-1, -1, -1):\r\n left_side_coordinates = []\r\n else:\r\n for i in range(len(vertices_nested_again_shapes)):\r\n for j in range(len(vertices_nested_again_shapes[i])):\r\n if var_3 == vertices_nested_again_shapes[i][j] or var_6 == vertices_nested_again_shapes[i][j]:\r\n left_side_coordinates.append(vertices_nested_again_shapes[i])\r\n break\r\n\r\n # print(\"left_side_coordinates = \", left_side_coordinates)\r\n shortest_distance_x = 0\r\n for i in range(len(left_side_coordinates)):\r\n if maximum_y_coordinate(left_side_coordinates[i]) < min_y:\r\n continue\r\n if maximum_x_coordinate(left_side_coordinates[i]) > shortest_distance_x :\r\n shortest_distance_x = maximum_x_coordinate(left_side_coordinates[i])\r\n # print(\"shortest_distance_x = \",shortest_distance_x)\r\n\r\n right_shape_nested = []\r\n for i in range(len(right_shape)):\r\n if shortest_distance_x == 0:\r\n a = right_shape[i][0] + 1\r\n else:\r\n a = right_shape[i][0] - ((min_x - shortest_distance_x) - 1)\r\n right_shape_nested.append((a, right_shape[i][1], 0))\r\n vertices_nested_again_shapes.append(right_shape_nested)\r\n return vertices_nested_again_shapes\r\n\r\n\r\ndef gravity_approach(length_sheet, width_sheet, new_vertices_shapes, arranged_groups, invalid_shapes): # main nesting calculation function\r\n new_vertices_other_shapes = []\r\n maximum_y_for_other_shape_for_new_column = 0\r\n maximum_y_for_other_shape_for_current_column = 0\r\n maximum_x_of_previous_column = 0\r\n first_shape_placed = 0\r\n for j in range(len(new_vertices_shapes)):\r\n maximum_y_compare = maximum_y_coordinate(new_vertices_shapes[j])\r\n if maximum_y_compare > maximum_y_for_other_shape_for_new_column:\r\n maximum_y_for_other_shape_for_new_column = maximum_y_compare\r\n for p in range(len(arranged_groups)):\r\n moved_other_shape = []\r\n intersection_with_previous_shapes = 0\r\n intersection_with_previous_other_shapes = 0\r\n if p == 0:\r\n # Use maximum y to place the circles and other shapes and check for intersection and sheet value\r\n other_shape_to_move = arranged_groups[p]\r\n vertical_movement_other_shape = width_sheet - (maximum_y_for_other_shape_for_new_column + 1)\r\n minimum_x_of_other_shape = minimum_x_coordinate(other_shape_to_move)\r\n horizontal_movement_other_shape = minimum_x_of_other_shape - 1\r\n for j in range(len(other_shape_to_move)):\r\n k = other_shape_to_move[j][0] - horizontal_movement_other_shape\r\n l = other_shape_to_move[j][1] - vertical_movement_other_shape\r\n moved_other_shape.append((round(k, 2), round(l, 2), 0))\r\n\r\n # Checking the intersection of current shape with previously placed shapes\r\n for j in range(len(new_vertices_shapes)):\r\n if intersection_of_shapes(new_vertices_shapes[j],moved_other_shape) == True:\r\n intersection_with_previous_shapes = 1\r\n\r\n # Placing the shapes if no intersections exist\r\n if intersection_with_previous_shapes == 0:\r\n for j in range(len(moved_other_shape)):\r\n invalid = 0\r\n if (((moved_other_shape[j][0] > length_sheet) or (\r\n moved_other_shape[j][1] > width_sheet)) and invalid == 0):\r\n invalid = 1\r\n break\r\n if invalid == 0:\r\n new_vertices_other_shapes.append(moved_other_shape)\r\n first_shape_placed = 1\r\n\r\n if invalid == 1:\r\n invalid_shapes.append(arranged_groups[p])\r\n\r\n if p != 0:\r\n # Use maximum y to place the circles and other shapes and check for intersection and sheet value\r\n other_shape_to_move = arranged_groups[p]\r\n if first_shape_placed == 1:\r\n maximum_y_for_other_shape_for_current_column = maximum_y_coordinate(new_vertices_other_shapes[len(new_vertices_other_shapes)-1])\r\n else:\r\n maximum_y_for_other_shape_for_current_column = maximum_y_for_other_shape_for_new_column\r\n minimum_x_of_other_shape = minimum_x_coordinate(other_shape_to_move)\r\n horizontal_movement_other_shape = minimum_x_of_other_shape - (maximum_x_of_previous_column + 1)\r\n vertical_movement_other_shape = width_sheet - (maximum_y_for_other_shape_for_current_column + 1)\r\n for j in range(len(other_shape_to_move)):\r\n k = other_shape_to_move[j][0] - horizontal_movement_other_shape\r\n l = other_shape_to_move[j][1] - vertical_movement_other_shape\r\n moved_other_shape.append((round(k, 2), round(l, 2), 0))\r\n\r\n # Checking the intersection of current shape with previously placed shapes\r\n for j in range(len(new_vertices_shapes)):\r\n if intersection_of_shapes(new_vertices_shapes[j], moved_other_shape) == True:\r\n intersection_with_previous_shapes = 1\r\n\r\n for j in range(len(new_vertices_other_shapes)):\r\n if intersection_of_shapes(new_vertices_other_shapes[j], moved_other_shape) == True:\r\n intersection_with_previous_other_shapes = 1\r\n\r\n # Placing the shapes if no intersections exist\r\n if intersection_with_previous_shapes == 0 and intersection_with_previous_other_shapes == 0:\r\n for j in range(len(moved_other_shape)):\r\n invalid = 0\r\n if (((moved_other_shape[j][0] > length_sheet) or (\r\n moved_other_shape[j][1] > width_sheet)) and invalid == 0):\r\n invalid = 1\r\n break\r\n if invalid == 0:\r\n new_vertices_other_shapes.append(moved_other_shape)\r\n # Trying to nest the shape with maximum values of previous shapes as the last option\r\n if invalid == 1:\r\n moved_other_shape = []\r\n for j in range(len(new_vertices_other_shapes)):\r\n maximum_x_compare = maximum_x_coordinate(new_vertices_other_shapes[j])\r\n if maximum_x_compare > maximum_x_of_previous_column:\r\n maximum_x_of_previous_column = maximum_x_compare\r\n horizontal_movement_other_shape = minimum_x_of_other_shape - (maximum_x_of_previous_column + 1)\r\n vertical_movement_other_shape = width_sheet - (maximum_y_for_other_shape_for_new_column + 1)\r\n for j in range(len(other_shape_to_move)):\r\n k = other_shape_to_move[j][0] - horizontal_movement_other_shape\r\n l = other_shape_to_move[j][1] - vertical_movement_other_shape\r\n moved_other_shape.append((round(k, 2), round(l, 2), 0))\r\n\r\n # Checking for intersection again with other shapes\r\n for j in range(len(new_vertices_shapes)):\r\n if intersection_of_shapes(new_vertices_shapes[j], moved_other_shape) == True:\r\n intersection_with_previous_shapes = 1\r\n\r\n # Final placement of invalid shapes (those which were not placed previously)\r\n if intersection_with_previous_shapes == 0:\r\n for j in range(len(moved_other_shape)):\r\n invalid = 0\r\n if (((moved_other_shape[j][0] > length_sheet) or (\r\n moved_other_shape[j][1] > width_sheet)) and invalid == 0):\r\n invalid = 1\r\n break\r\n if invalid == 0:\r\n new_vertices_other_shapes.append(moved_other_shape)\r\n if invalid == 1:\r\n invalid_shapes.append(moved_other_shape)\r\n return new_vertices_other_shapes\r\n\r\n\r\ndef print_func_1(new_vertices_shapes, invalid_shapes, grouped_nested_shapes): # function to print necessary statements for grouped shapes\r\n if len(invalid_shapes) == 1 and len(new_vertices_shapes) != 1:\r\n print(\"There is an unplaced shape and\", len(new_vertices_shapes),\r\n \"shapes have been placed in the sheet successfully.\")\r\n if len(new_vertices_shapes) == 1 and len(invalid_shapes) != 1:\r\n print(\"There are\", len(invalid_shapes),\r\n \"unplaced shapes and one shape has been placed in the sheet successfully.\")\r\n if len(invalid_shapes) == 1 and len(new_vertices_shapes) == 1:\r\n print(\"There is an unplaced shape and one shape has been placed in the sheet successfully.\")\r\n if len(new_vertices_shapes) != 1 and len(invalid_shapes) != 1:\r\n print(\"There are\", len(invalid_shapes), \"unplaced shapes and\", len(new_vertices_shapes),\r\n \"shapes have been placed in the sheet successfully.\")\r\n print(\"-----------------------------------------------\")\r\n print(\"Vertices of invalid shapes:\", invalid_shapes)\r\n print(\"-----------------------------------------------\")\r\n print(\"Final vertices for shapes: \", grouped_nested_shapes)\r\n print(\"-----------------------------------------------\")\r\n\r\n\r\ndef print_func_2(new_vertices_shapes, invalid_shapes): # function to print necessary statements for ungrouped shapes\r\n if len(invalid_shapes) == 1 and len(new_vertices_shapes) != 1:\r\n print(\"There is an unplaced shape and\", len(new_vertices_shapes),\r\n \"shapes have been placed in the sheet successfully.\")\r\n if len(new_vertices_shapes) == 1 and len(invalid_shapes) != 1:\r\n print(\"There are\", len(invalid_shapes),\r\n \"unplaced shapes and one shape has been placed in the sheet successfully.\")\r\n if len(invalid_shapes) == 1 and len(new_vertices_shapes) == 1:\r\n print(\"There is an unplaced shape and one shape has been placed in the sheet successfully.\")\r\n if len(new_vertices_shapes) != 1 and len(invalid_shapes) != 1:\r\n print(\"There are\", len(invalid_shapes), \"unplaced shapes and\", len(new_vertices_shapes),\r\n \"shapes have been placed in the sheet successfully.\")\r\n print(\"-----------------------------------------------\")\r\n print(\"Vertices of invalid shapes:\", invalid_shapes)\r\n print(\"-----------------------------------------------\")\r\n print(\"Final vertices for shapes: \", new_vertices_shapes)\r\n print(\"-----------------------------------------------\")\r\n\r\n\r\ndef dxf_calculations(msp, length_sheet, width_sheet): # function to extract coordinates from dxf drawings and their calculations\r\n list_for_start_end_points = []\r\n vertices_shapes_circle = []\r\n polyline = 0\r\n vertices_shapes_1 = []\r\n for e in msp:\r\n if e.dxftype() == \"LWPOLYLINE\":\r\n lines = msp.query('LWPOLYLINE')\r\n points = polyline\r\n first_point = lines[points]\r\n list_polyline = []\r\n for i in range(len(first_point)):\r\n x = round(first_point[i][0], 2)\r\n y = round(first_point[i][1], 2)\r\n list_polyline.append((x, y, 0))\r\n polyline = polyline + 1\r\n list_for_start_end_points.append([list_polyline[0], list_polyline[len(list_polyline) - 1]])\r\n vertices_shapes_1.append(clockwise_list(list_polyline))\r\n\r\n elif (e.dxftype() == \"LINE\"):\r\n list_line = []\r\n list_line.append((round(e.dxf.start[0], 2), round(e.dxf.start[1], 2), round(e.dxf.start[2], 2)))\r\n list_line.append((round(e.dxf.end[0], 2), round(e.dxf.end[1], 2), round(e.dxf.end[2], 2)))\r\n list_for_start_end_points.append([list_line[0], list_line[1]])\r\n vertices_shapes_1.append(clockwise_list(list_line))\r\n\r\n elif (e.dxftype() == \"CIRCLE\"):\r\n radius = e.dxf.radius\r\n area_through_radius = (pi * radius * radius)\r\n angle = 1\r\n times = 360 / angle\r\n x = e.dxf.center[0]\r\n y = e.dxf.center[1]\r\n theta = 0\r\n circle_area = 0\r\n # Developing a polygon which contains 360 sides and also resembles circle completely\r\n Circle_shape = []\r\n for i in range(int(times)):\r\n point_circle = (round(x + ((radius) * sin(theta * (pi / 180))), 2),\r\n round(y + ((radius) * cos(theta * (pi / 180))), 2), 0)\r\n Circle_shape.append(point_circle)\r\n theta = theta + angle\r\n vertices_shapes_circle.append(clockwise_list(Circle_shape))\r\n\r\n elif (e.dxftype() == \"ARC\"):\r\n x = e.dxf.center[0]\r\n y = e.dxf.center[1]\r\n radius_arc = e.dxf.radius\r\n start_angle = e.dxf.start_angle\r\n end_angle = e.dxf.end_angle\r\n if end_angle > start_angle:\r\n if round(end_angle - start_angle, 5) <= 90:\r\n times = 5\r\n theta_arc = start_angle\r\n angle_arc = (end_angle - start_angle) / 5\r\n if end_angle - start_angle >= 90 or end_angle - start_angle <= 180:\r\n times = 20\r\n theta_arc = start_angle\r\n angle_arc = (end_angle - start_angle) / 20\r\n if end_angle - start_angle >= 180 or end_angle - start_angle <= 270:\r\n times = 30\r\n theta_arc = start_angle\r\n angle_arc = (end_angle - start_angle) / 30\r\n if end_angle - start_angle > 270:\r\n times = 40\r\n theta_arc = start_angle\r\n angle_arc = (end_angle - start_angle) / 40\r\n\r\n else:\r\n times = 25\r\n theta_arc = end_angle\r\n angle_arc = -((360 - start_angle) + end_angle) / 25\r\n arc_shape = []\r\n\r\n for i in range(int(times + 1)):\r\n F = round(x + (radius_arc * cos(theta_arc * (pi / 180))), 2)\r\n E = round(y + (radius_arc * sin(theta_arc * (pi / 180))), 2)\r\n theta_arc = theta_arc + angle_arc\r\n arc_shape.append((F, E, 0))\r\n arc_shape = clockwise_list(arc_shape)\r\n list_for_start_end_points.append([arc_shape[0], arc_shape[len(arc_shape) - 1]])\r\n vertices_shapes_1.append(clockwise_list(arc_shape))\r\n\r\n shape_1 = []\r\n pre_grp_indices = []\r\n main_shp_indices = []\r\n for i in range(len(list_for_start_end_points)):\r\n sp_1_x = truncate(list_for_start_end_points[i][0][0], 1)\r\n sp_1_y = truncate(list_for_start_end_points[i][0][1], 1)\r\n ep_1_x = truncate(list_for_start_end_points[i][1][0], 1)\r\n ep_1_y = truncate(list_for_start_end_points[i][1][1], 1)\r\n sp_1 = (sp_1_x, sp_1_y)\r\n ep_1 = (ep_1_x, ep_1_y)\r\n if i not in main_shp_indices:\r\n shape_1.append(i)\r\n main_shp_indices.append(i)\r\n for j in range(len(list_for_start_end_points)):\r\n sp_2_x = truncate(list_for_start_end_points[j][0][0], 1)\r\n sp_2_y = truncate(list_for_start_end_points[j][0][1], 1)\r\n ep_2_x = truncate(list_for_start_end_points[j][1][0], 1)\r\n ep_2_y = truncate(list_for_start_end_points[j][1][1], 1)\r\n sp_2 = (sp_2_x, sp_2_y)\r\n ep_2 = (ep_2_x, ep_2_y)\r\n if i != j:\r\n if sp_1 == sp_2 or sp_1 == ep_2 or ep_1 == sp_2 or ep_1 == ep_2: # or (ep_1_x- ep_2_x <= 0.1) or (ep_1_x- ep_2_x <= -0.1) or (ep_1_y- ep_2_y <= 0.1) or (ep_1_y- ep_2_y <= -0.1) or (sp_1_x - sp_2_x <= 0.1) or (sp_1_x - sp_2_x <= -0.1) or (sp_1_y - sp_2_y <= 0.1) or (sp_1_y - sp_2_y <= -0.1) or (ep_1_x - sp_2_x <= 0.1) or (ep_1_x - sp_2_x <= -0.1) or (ep_1_y - sp_2_y <= 0.1) or (ep_1_y - sp_2_y <= -0.1):\r\n shape_1.append(j)\r\n pre_grp_indices.append(sorted(shape_1))\r\n shape_1 = []\r\n\r\n temp_union_ind_list = []\r\n temp_union_ind_list_again = []\r\n for i in range(len(pre_grp_indices)):\r\n temp_shape1 = set(pre_grp_indices[i])\r\n temp_shape3 = pre_grp_indices[i]\r\n for j in range(len(pre_grp_indices)):\r\n temp_shape2 = set(pre_grp_indices[j])\r\n temp_shape4 = pre_grp_indices[j]\r\n if len(temp_shape1.intersection(temp_shape2)) > 0:\r\n temp_shape3 = list(set().union(temp_shape3, temp_shape4))\r\n temp_shape1 = set(temp_shape3)\r\n for k in range(len(pre_grp_indices)):\r\n temp_shape2 = set(pre_grp_indices[k])\r\n temp_shape4 = pre_grp_indices[k]\r\n if len(temp_shape1.intersection(temp_shape2)) > 0:\r\n temp_shape3 = list(set().union(temp_shape3, temp_shape4))\r\n temp_shape1 = set(temp_shape3)\r\n if temp_shape3 not in temp_union_ind_list:\r\n temp_union_ind_list.append(temp_shape3)\r\n\r\n for i in range(len(temp_union_ind_list)):\r\n temp_shape1 = set(temp_union_ind_list[i])\r\n temp_shape3 = temp_union_ind_list[i]\r\n for j in range(len(temp_union_ind_list)):\r\n temp_shape2 = set(temp_union_ind_list[j])\r\n temp_shape4 = temp_union_ind_list[j]\r\n if len(temp_shape1.intersection(temp_shape2)) > 0:\r\n temp_shape3 = list(set().union(temp_shape3, temp_shape4))\r\n temp_shape1 = set(temp_shape3)\r\n for k in range(len(temp_union_ind_list)):\r\n temp_shape2 = set(temp_union_ind_list[k])\r\n temp_shape4 = temp_union_ind_list[k]\r\n if len(temp_shape1.intersection(temp_shape2)) > 0:\r\n temp_shape3 = list(set().union(temp_shape3, temp_shape4))\r\n temp_shape1 = set(temp_shape3)\r\n if temp_shape3 not in temp_union_ind_list_again:\r\n temp_union_ind_list_again.append(temp_shape3)\r\n\r\n union_ind_list = []\r\n for i in range(len(temp_union_ind_list_again)):\r\n if i == 0:\r\n union_ind_list.append(temp_union_ind_list_again[i])\r\n continue\r\n temp_var = 0\r\n for k in range(len(union_ind_list)):\r\n if temp_union_ind_list_again[i][0] in union_ind_list[k]:\r\n temp_var = 1\r\n if temp_var == 0 and (temp_union_ind_list_again[i] not in union_ind_list):\r\n union_ind_list.append(temp_union_ind_list_again[i])\r\n\r\n start_end_points = []\r\n ind_ver_list = []\r\n union_ver_list = []\r\n for i in range(len(union_ind_list)):\r\n temp_start_end_points = []\r\n for j in range(len(union_ind_list[i])):\r\n index_value = union_ind_list[i][j]\r\n random_list = vertices_shapes_1[int(index_value)]\r\n temp_start_end_points.append([random_list[0], random_list[len(random_list) - 1]])\r\n for k in range(len(random_list)):\r\n ind_ver_list.append(random_list[k])\r\n union_ver_list.append(clockwise_list(ind_ver_list))\r\n start_end_points.append(temp_start_end_points)\r\n ind_ver_list = []\r\n\r\n appending_indices = []\r\n seq_ind = []\r\n for i in range(len(start_end_points)):\r\n appending_indices = []\r\n for j in range(len(start_end_points[i])):\r\n if j == 0:\r\n appending_indices.append(union_ind_list[i][0])\r\n appending_st_en_pts = start_end_points[i][0]\r\n st_pt_1 = (truncate(appending_st_en_pts[0][0], 1), truncate(appending_st_en_pts[0][1], 1))\r\n en_pt_1 = (truncate(appending_st_en_pts[1][0], 1), truncate(appending_st_en_pts[1][1], 1))\r\n if j != 0:\r\n last_index = appending_indices[len(appending_indices) - 1]\r\n real_index_value = union_ind_list[i].index(last_index)\r\n appending_st_en_pts = start_end_points[i][real_index_value]\r\n st_pt_1 = (truncate(appending_st_en_pts[0][0], 1), truncate(appending_st_en_pts[0][1], 1))\r\n en_pt_1 = (truncate(appending_st_en_pts[1][0], 1), truncate(appending_st_en_pts[1][1], 1))\r\n for k in range(len(start_end_points[i])):\r\n if (union_ind_list[i][k] in appending_indices) == True:\r\n continue\r\n st_pt_2 = (truncate(start_end_points[i][k][0][0], 1), truncate(start_end_points[i][k][0][1], 1))\r\n en_pt_2 = (truncate(start_end_points[i][k][1][0], 1), truncate(start_end_points[i][k][1][1], 1))\r\n if en_pt_1 == st_pt_2: # or (en_pt_1[0] - st_pt_2[0] <= 0.1) or (en_pt_1[0] - st_pt_2[0] <= -0.1) or (en_pt_1[1] - st_pt_2[1] <= 0.1) or (en_pt_1[1] - st_pt_2[1] <= -0.1):\r\n appending_indices.append(union_ind_list[i][k])\r\n break\r\n if en_pt_1 == en_pt_2 or st_pt_1 == st_pt_2: # or (en_pt_1[0]- en_pt_2[0] <= 0.1) or (en_pt_1[0]- en_pt_2[0] <= -0.1) or (en_pt_1[1]- en_pt_2[1] <= 0.1) or (en_pt_1[1]- en_pt_2[1] <= -0.1) or st_pt_1 == st_pt_2 or (st_pt_1[0] - st_pt_2[0] <= 0.1) or (st_pt_1[0] - st_pt_2[0] <= -0.1) or (st_pt_1[1] - st_pt_2[1] <= 0.1) or (st_pt_1[1] - st_pt_2[1] <= -0.1):\r\n start_end_points[i][k] = anticlockwise_list(start_end_points[i][k])\r\n vertices_shapes_1[union_ind_list[i][k]] = anticlockwise_list(\r\n vertices_shapes_1[union_ind_list[i][k]])\r\n st_pt_2 = (truncate(start_end_points[i][k][0][0], 1), truncate(start_end_points[i][k][0][1], 1))\r\n en_pt_2 = (truncate(start_end_points[i][k][1][0], 1), truncate(start_end_points[i][k][1][1], 1))\r\n if en_pt_1 == st_pt_2: # or (en_pt_1[0] - st_pt_2[0] <= 0.1) or (en_pt_1[0] - st_pt_2[0] <= -0.1) or (en_pt_1[1] - st_pt_2[1] <= 0.1) or (en_pt_1[1] - st_pt_2[1] <= -0.1):\r\n appending_indices.append(union_ind_list[i][k])\r\n break\r\n else:\r\n start_end_points[i][k] = anticlockwise_list(start_end_points[i][k])\r\n vertices_shapes_1[union_ind_list[i][k]] = anticlockwise_list(\r\n vertices_shapes_1[union_ind_list[i][k]])\r\n st_pt_2 = (truncate(start_end_points[i][k][0][0], 1), truncate(start_end_points[i][k][0][1], 1))\r\n en_pt_2 = (truncate(start_end_points[i][k][1][0], 1), truncate(start_end_points[i][k][1][1], 1))\r\n\r\n seq_ind.append(appending_indices)\r\n appending_indices = []\r\n vertices_shapes = []\r\n for k in range(len(seq_ind)):\r\n temp_list_shape = []\r\n for m in range(len(seq_ind[k])):\r\n list_shape = vertices_shapes_1[seq_ind[k][m]]\r\n for n in range(len(list_shape)):\r\n if list_shape[n] not in temp_list_shape:\r\n temp_list_shape.append(list_shape[n])\r\n list_shape = []\r\n vertices_shapes.append(temp_list_shape)\r\n\r\n vertices_shapes_1 = vertices_shapes\r\n for j in range(len(vertices_shapes_circle)):\r\n vertices_shapes_1.append(vertices_shapes_circle[j])\r\n vertices_shapes_2 = []\r\n for i in range(len(vertices_shapes_1)):\r\n temp_shape = []\r\n for j in range(len(vertices_shapes_1[i])):\r\n temp_shape.append([vertices_shapes_1[i][j][0], vertices_shapes_1[i][j][1]])\r\n vertices_shapes_2.append(temp_shape)\r\n\r\n shape = []\r\n c = [\"a%d\" % x for x in range(1, len(vertices_shapes_2) + 1)]\r\n for x in range(len(c)): # you can loop over them\r\n c[x] = np.array(vertices_shapes_2[x])\r\n y = c[x]\r\n p = Polygon(y, facecolor='none', edgecolor='b')\r\n shape.append(p)\r\n fig, ax = plt.subplots()\r\n c = [\"%d\" % x for x in range(1, len(shape) + 1)]\r\n for x in range(len(c)):\r\n ax.text(vertices_shapes_2[x][0][0], vertices_shapes_2[x][0][1], str(c[x]), style='italic')\r\n ax.add_patch(shape[x])\r\n ax.set_xlim([0, width_sheet])\r\n ax.set_ylim([0, length_sheet])\r\n plt.show()\r\n return vertices_shapes\r\n\r\ndef anticlockwise_list(list_shape2): # it converts clockwise list to anti-clockwise list\r\n list_shape1 = []\r\n for i in range(len(list_shape2)):\r\n list_shape1.append(list_shape2[len(list_shape2) - 1 - i])\r\n return list_shape1\r\n\r\n\r\ndef truncate(number, decimals=0): # Returns a value truncated to a specific number of decimal places.\r\n if not isinstance(decimals, int):\r\n raise TypeError(\"decimal places must be an integer.\")\r\n elif decimals < 0:\r\n raise ValueError(\"decimal places has to be 0 or more.\")\r\n elif decimals == 0:\r\n return math.trunc(number)\r\n\r\n factor = 10.0 ** decimals\r\n return math.trunc(number * factor) / factor\r\n","sub_path":"nestle-mynest/nestle/mynest/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":62849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"169184543","text":"from __future__ import division\nimport torch\nimport torch.nn as nn\nimport numpy as np\nfrom collections import defaultdict\nfrom RSNA.model.focalloss import FocalLoss\nimport torch.nn.functional as F\n\n\ndef parse_cfg(cfgfile):\n \"\"\"\n Takes a configuration file\n\n Returns a list of blocks. Each blocks describes a block in the neural\n network to be built. Block is represented as a dictionary in the list\n\n \"\"\"\n\n file = open(cfgfile, 'r')\n lines = file.read().split('\\n') # store the lines in a list\n lines = [x for x in lines if len(x) > 0] # get read of the empty lines\n lines = [x for x in lines if x[0] != '#'] # get rid of comments\n lines = [x.rstrip().lstrip() for x in lines] # get rid of fringe whitespaces\n\n block = {}\n blocks = []\n\n for line in lines:\n if line[0] == \"[\": # This marks the start of a new block\n if len(block) != 0: # If block is not empty, implies it is storing values of previous block.\n blocks.append(block) # add it the blocks list\n block = {} # re-init the block\n block[\"type\"] = line[1:-1].rstrip()\n else:\n key, value = line.split(\"=\")\n block[key.rstrip()] = value.lstrip()\n blocks.append(block)\n\n return blocks\n\n\nclass EmptyLayer(nn.Module):\n def __init__(self):\n super(EmptyLayer, self).__init__()\n\n\nclass DetectionLayer(nn.Module):\n def __init__(self, inp_dim, anchors):\n super(DetectionLayer, self).__init__()\n self.anchors = anchors\n self.inp_dim = inp_dim\n self.mse_loss = nn.MSELoss()\n self.bce_loss = nn.BCELoss()\n self.sml_loss = nn.SmoothL1Loss()\n self.focalLoss = FocalLoss(gamma=2)\n\n def forward(self, x, device, anchors_index, target=None, iou_thre=0.4):\n is_training = target is not None\n if is_training:\n if anchors_index == 0:\n target = target['low_reso']\n elif anchors_index == 1:\n target = target['mid_reso']\n elif anchors_index == 2:\n target = target['high_reso']\n\n batch_size = x.size(0)\n stride = self.inp_dim // x.size(2)\n grid_size = self.inp_dim // stride\n bbox_attrs = 5\n num_anchors = len(self.anchors)\n # print(stride, grid_size, x.size(2), self.inp_dim)\n\n x = x.view(batch_size, bbox_attrs * num_anchors, grid_size * grid_size)\n x = x.transpose(1, 2).contiguous()\n x = x.view(batch_size, grid_size * grid_size * num_anchors, bbox_attrs)\n anchors = [(a[0] / stride, a[1] / stride) for a in self.anchors]\n # print(anchors)\n # Sigmoid the centre_X, centre_Y. and object confidencce\n x[:, :, 0] = torch.sigmoid(x[:, :, 0])\n x[:, :, 1] = torch.sigmoid(x[:, :, 1])\n x[:, :, 4] = torch.sigmoid(x[:, :, 4])\n if is_training:\n # calc loss\n # (batchsize, anchor_num, feature_size, feature_size, bbox_attrs)\n target = target.permute(0, 2, 3, 1, 4).contiguous()\n target = target.view(-1, grid_size * grid_size * num_anchors, 6)\n self.mse_loss = self.mse_loss.to(device)\n self.bce_loss = self.bce_loss.to(device)\n self.sml_loss = self.sml_loss.to(device)\n self.focalLoss = self.focalLoss.to(device)\n target = target.to(device)\n\n # select anchor center index\n target_squeeze = target.view(-1, 6)\n target_index = torch.nonzero(target_squeeze[..., 5]).squeeze()\n x_squeeze = x.view(-1, bbox_attrs)\n x_squeeze = x_squeeze.index_select(0, target_index)\n target_squeeze = target_squeeze.index_select(0, target_index)\n\n # calc\n if target_squeeze.size(0):\n n_correct = (x_squeeze[..., 4] > iou_thre).sum().item()\n n_gt = target_squeeze[..., 5].sum().item()\n else:\n n_gt = 0\n n_correct = 0\n # print(\"n_correct:\", n_correct, \"n_gt\", n_gt)\n\n # if target_squeeze.size(0):\n # loss_x = self.sml_loss(x_squeeze[..., 0], target_squeeze[..., 0])\n # loss_y = self.sml_loss(x_squeeze[..., 1], target_squeeze[..., 1])\n # loss_w = self.sml_loss(x_squeeze[..., 2], target_squeeze[..., 2]) / 2\n # loss_h = self.sml_loss(x_squeeze[..., 3], target_squeeze[..., 3]) / 2\n # else:\n # loss_x = torch.from_numpy(np.array(0)).type(torch.FloatTensor).to(device)\n # loss_y = torch.from_numpy(np.array(0)).type(torch.FloatTensor).to(device)\n # loss_w = torch.from_numpy(np.array(0)).type(torch.FloatTensor).to(device)\n # loss_h = torch.from_numpy(np.array(0)).type(torch.FloatTensor).to(device)\n\n loss_x = self.sml_loss(x[..., 0] * target[..., 5], target[..., 0] * target[..., 5])\n loss_y = self.sml_loss(x[..., 1] * target[..., 5], target[..., 1] * target[..., 5])\n loss_w = self.sml_loss(x[..., 2] * target[..., 5], target[..., 2] * target[..., 5]) / 2\n loss_h = self.sml_loss(x[..., 3] * target[..., 5], target[..., 3] * target[..., 5]) / 2\n\n # loss_conf = self.focalLoss(x[..., 4], (target[..., 4] > iou_thre).type(torch.FloatTensor).to(device))\n loss_conf = self.bce_loss(x[..., 4], target[..., 4]).type(torch.FloatTensor).to(device)\n loss = 1.0 * (loss_x + loss_y + loss_w + loss_h) + loss_conf\n return loss, loss_x.item(), loss_y.item(), loss_w.item(), loss_h.item(), loss_conf.item(), n_gt, n_correct\n else:\n heatmap = x.view(batch_size, grid_size, grid_size, num_anchors, bbox_attrs)\n # Add the center offsets\n grid = np.arange(grid_size)\n a, b = np.meshgrid(grid, grid)\n\n x_offset = torch.FloatTensor(a).view(-1, 1)\n y_offset = torch.FloatTensor(b).view(-1, 1)\n\n x_offset = x_offset.to(device)\n y_offset = y_offset.to(device)\n\n x_y_offset = torch.cat((x_offset, y_offset), 1).repeat(1, num_anchors).view(-1, 2).unsqueeze(0)\n x[:, :, :2] += x_y_offset\n\n # log space transform height and the width\n anchors = torch.FloatTensor(anchors).to(device)\n anchors = anchors.repeat(grid_size * grid_size, 1).unsqueeze(0)\n x[:, :, 2:4] = torch.exp(x[:, :, 2:4]) * anchors\n x[:, :, :4] *= stride\n\n return heatmap, x\n\n\ndef create_modules(blocks, channel=1, droprate=0.2):\n net_info = blocks[0] # Captures the information about the input and pre-processing\n input_dim = net_info['height']\n module_list = nn.ModuleList()\n prev_filters = channel\n output_filters = []\n\n for index, x in enumerate(blocks[1:]):\n module = nn.Sequential()\n\n # check the type of block\n # create a new module for the block\n # append to module_list\n\n # If it's a convolutional layer\n if (x[\"type\"] == \"convolutional\"):\n # Get the info about the layer\n activation = x[\"activation\"]\n try:\n batch_normalize = int(x[\"batch_normalize\"])\n bias = False\n except:\n batch_normalize = 0\n bias = True\n\n filters = int(x[\"filters\"])\n padding = int(x[\"pad\"])\n kernel_size = int(x[\"size\"])\n stride = int(x[\"stride\"])\n\n if padding:\n pad = (kernel_size - 1) // 2\n else:\n pad = 0\n\n # Add the convolutional layer\n conv = nn.Conv2d(prev_filters, filters, kernel_size, stride, pad, bias=bias)\n module.add_module(\"conv_{0}\".format(index), conv)\n if droprate is not None:\n module.add_module(\"dp_{0}\".format(index), nn.Dropout2d(p=droprate))\n # Add the Batch Norm Layer\n if batch_normalize:\n bn = nn.BatchNorm2d(filters)\n module.add_module(\"batch_norm_{0}\".format(index), bn)\n\n # Check the activation.\n # It is either Linear or a Leaky ReLU for YOLO\n if activation == \"leaky\":\n activn = nn.LeakyReLU(0.1, inplace=True)\n module.add_module(\"leaky_{0}\".format(index), activn)\n\n # If it's an upsampling layer\n # We use Bilinear2dUpsampling\n elif (x[\"type\"] == \"upsample\"):\n stride = int(x[\"stride\"])\n upsample = nn.Upsample(scale_factor=2, mode=\"nearest\")\n module.add_module(\"upsample_{}\".format(index), upsample)\n\n # If it is a route layer\n elif (x[\"type\"] == \"route\"):\n x[\"layers\"] = x[\"layers\"].split(',')\n # Start of a route\n start = int(x[\"layers\"][0])\n # end, if there exists one.\n try:\n end = int(x[\"layers\"][1])\n except:\n end = 0\n # Positive anotation\n if start > 0:\n start = start - index\n if end > 0:\n end = end - index\n route = EmptyLayer()\n module.add_module(\"route_{0}\".format(index), route)\n if end < 0:\n filters = output_filters[index + start] + output_filters[index + end]\n else:\n filters = output_filters[index + start]\n\n # shortcut corresponds to skip connection\n elif x[\"type\"] == \"shortcut\":\n shortcut = EmptyLayer()\n module.add_module(\"shortcut_{}\".format(index), shortcut)\n\n # Yolo is the detection layer\n elif x[\"type\"] == \"yolo\":\n mask = x[\"mask\"].split(\",\")\n mask = [int(x) for x in mask]\n\n anchors = x[\"anchors\"].split(\",\")\n anchors = [int(a) for a in anchors]\n anchors = [(anchors[i], anchors[i + 1]) for i in range(0, len(anchors), 2)]\n anchors = [anchors[i] for i in mask]\n\n detection = DetectionLayer(int(input_dim), anchors)\n module.add_module(\"Detection_{}\".format(index), detection)\n\n module_list.append(module)\n prev_filters = filters\n output_filters.append(filters)\n\n return (net_info, module_list)\n\n\nclass Darknet(nn.Module):\n def __init__(self, cfgfile, channel=1, clf=False, droprate=0.2):\n super(Darknet, self).__init__()\n self.blocks = parse_cfg(cfgfile)\n self.net_info, self.module_list = create_modules(self.blocks, channel=channel, droprate=droprate)\n self.loss_name = ['x', 'y', 'w', 'h', 'conf', 'gt_num', 'pred_num']\n self.yolo_detect_num = 0\n self.clf_support = clf\n self.droprate = droprate\n\n def forward(self, x, device, target=None):\n modules = self.blocks[1:]\n outputs = {} # We cache the outputs for the route layer\n loss_sum_list = []\n is_training = target is not None\n self.losses = defaultdict(float)\n write = 0\n heatmap_list = []\n for i, module in enumerate(modules):\n # get clf_feature\n if (self.yolo_detect_num == 0) and self.clf_support and (x.size(1) == 1024):\n clf_feature = x\n\n module_type = (module[\"type\"])\n\n if module_type == \"convolutional\" or module_type == \"upsample\":\n x = self.module_list[i](x)\n\n elif module_type == \"route\":\n layers = module[\"layers\"]\n layers = [int(a) for a in layers]\n\n if (layers[0]) > 0:\n layers[0] = layers[0] - i\n\n if len(layers) == 1:\n x = outputs[i + (layers[0])]\n\n else:\n if (layers[1]) > 0:\n layers[1] = layers[1] - i\n\n map1 = outputs[i + layers[0]]\n map2 = outputs[i + layers[1]]\n x = torch.cat((map1, map2), 1)\n\n elif module_type == \"shortcut\":\n from_ = int(module[\"from\"])\n x = outputs[i - 1] + outputs[i + from_]\n\n elif module_type == 'yolo':\n if target is None:\n # inference\n # Transform\n heatmap, x = self.module_list[i][0](x, device, self.yolo_detect_num, target=None)\n if not write: # if no collector has been intialised.\n detections = x\n write = 1\n else:\n detections = torch.cat((detections, x), 1)\n heatmap_list.append(heatmap)\n\n else:\n # train\n x, *losses = self.module_list[i][0](x, device, self.yolo_detect_num, target=target)\n loss_sum_list.append(x)\n\n for name, loss in zip(self.loss_name, losses):\n self.losses[name] += loss\n self.yolo_detect_num += 1\n if self.yolo_detect_num == 3:\n self.yolo_detect_num = 0\n outputs[i] = x\n\n\n if self.clf_support:\n return clf_feature, (sum(loss_sum_list) if is_training else (heatmap_list, detections))\n else:\n return sum(loss_sum_list) if is_training else (heatmap_list, detections)\n\n def load_weights(self, weightfile):\n # Open the weights file\n fp = open(weightfile, \"rb\")\n\n # The first 5 values are header information\n # 1. Major version number\n # 2. Minor Version Number\n # 3. Subversion number\n # 4,5. Images seen by the network (during training)\n header = np.fromfile(fp, dtype=np.int32, count=5)\n self.header = torch.from_numpy(header)\n self.seen = self.header[3]\n\n weights = np.fromfile(fp, dtype=np.float32)\n\n ptr = 0\n for i in range(len(self.module_list)):\n module_type = self.blocks[i + 1][\"type\"]\n\n # If module_type is convolutional load weights\n # Otherwise ignore.\n\n if module_type == \"convolutional\":\n model = self.module_list[i]\n try:\n batch_normalize = int(self.blocks[i + 1][\"batch_normalize\"])\n except:\n batch_normalize = 0\n\n conv = model[0]\n\n if (batch_normalize):\n if self.droprate is not None:\n bn = model[2]\n\n else:\n bn = model[1]\n\n # Get the number of weights of Batch Norm Layer\n num_bn_biases = bn.bias.numel()\n\n # Load the weights\n bn_biases = torch.from_numpy(weights[ptr:ptr + num_bn_biases])\n ptr += num_bn_biases\n\n bn_weights = torch.from_numpy(weights[ptr: ptr + num_bn_biases])\n ptr += num_bn_biases\n\n bn_running_mean = torch.from_numpy(weights[ptr: ptr + num_bn_biases])\n ptr += num_bn_biases\n\n bn_running_var = torch.from_numpy(weights[ptr: ptr + num_bn_biases])\n ptr += num_bn_biases\n\n # Cast the loaded weights into dims of model weights.\n bn_biases = bn_biases.view_as(bn.bias.data)\n bn_weights = bn_weights.view_as(bn.weight.data)\n bn_running_mean = bn_running_mean.view_as(bn.running_mean)\n bn_running_var = bn_running_var.view_as(bn.running_var)\n\n # Copy the data to model\n bn.bias.data.copy_(bn_biases)\n bn.weight.data.copy_(bn_weights)\n bn.running_mean.copy_(bn_running_mean)\n bn.running_var.copy_(bn_running_var)\n\n else:\n # Number of biases\n num_biases = conv.bias.numel()\n\n # Load the weights\n conv_biases = torch.from_numpy(weights[ptr: ptr + num_biases])\n ptr = ptr + num_biases\n\n # reshape the loaded weights according to the dims of the model weights\n conv_biases = conv_biases.view_as(conv.bias.data)\n\n # Finally copy the data\n conv.bias.data.copy_(conv_biases)\n\n # Let us load the weights for the Convolutional layers\n num_weights = conv.weight.numel()\n\n # Do the same as above for weights\n conv_weights = torch.from_numpy(weights[ptr:ptr + num_weights])\n ptr = ptr + num_weights\n\n conv_weights = conv_weights.view_as(conv.weight.data)\n conv.weight.data.copy_(conv_weights)\n\n\nif __name__ == \"__main__\":\n darknet = Darknet(\"/home/mengdi/yuxiang.ye/YOLO_v3_tutorial_from_scratch/cfg/yolov3.cfg\")\n darknet = darknet.cuda()\n pred = darknet(img.cuda(), torch.cuda.is_available())\n print(pred)","sub_path":"model/darknet.py","file_name":"darknet.py","file_ext":"py","file_size_in_byte":17047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"467349022","text":"#motion_detector will be executed before importing df from plotting\nfrom motion_detector import df\nfrom bokeh.plotting import figure, show, output_file\nfrom bokeh.models import HoverTool, ColumnDataSource\n\n\"\"\"\nCreate bokeh graph based off of start and stop times of figures\nentering into frame. Importing hover tool to show start and end times\nwhen hovering over plot.\n\"\"\"\n\ndf[\"Start_string\"]=df[\"Start\"].dt.strftime(\"%Y - %m - %d %H: %M: %S\")\ndf[\"End_string\"]=df[\"End\"].dt.strftime(\"%Y - %m - %d %H: %M: %S\")\n\ncds=ColumnDataSource(df)\n\np=figure(x_axis_type=\"datetime\", height=100, width=500, title=\"Motion Graph\")\np.yaxis.minor_tick_line_color=None\np.yaxis.ticker.desired_num_ticks=1\n\nhover=HoverTool(tooltips=[(\"Start\", \"@Start_string\"),(\"End\", \"@End_string\")])\np.add_tools(hover)\n\nq=p.quad(left=\"Start\",right=\"End\",bottom=0,top=1, color=\"green\",source=cds)\n\noutput_file(\"Graph.html\")\nshow(p)\n","sub_path":"plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"109023716","text":"import time\nimport warnings\nfrom numpy import linalg as la\nimport networkx as nx\nimport numpy as np\nimport pandas as pd\nfrom sklearn.preprocessing import normalize\nimport args\nfrom algorithm.LFM import LFM\nfrom config import RepMethod\n# from lsi.LSI import LSI\nfrom detect.SCAN import SCAN\nfrom src.emd import getEMDCommunity, getEMDCommunitys\nfrom src.xtadw import XTADW\n# from src.emd import getEMDCommunity\nfrom src.graph import Graph\n\n\nclass RECM(object):\n def __init__(self, dim, lamb, graph1, graph2):\n self.lamb = lamb\n self.dim = dim\n self.node_size = graph1.N + graph2.N\n self.graph1 = graph1\n self.graph2 = graph2\n self.features = None\n\n def train(self, epochs, rep_method=None, combineFeature=None):\n \"\"\"\n\n :param epochs: train epochs\n :param rep_method: rep learn's var\n :param combineFeature: G1 and G2's combination fearture\n :return: xTAWD's result\n \"\"\"\n # self.adj = self.getAdj()\n # M=(A+A^2)/2 where A is the row-normalized adjacency matrix\n self.M = self.getSimilarityMatrix(rep_method, combineFeature)\n # T is feature_size*node_num, text features\n # get from lsi\n self.T = self.features.T\n self.feature_size = self.features.shape[1]\n self.W = np.random.randn(self.dim, self.node_size)\n self.H = np.random.randn(self.dim, self.feature_size)\n # Update\n for i in range(epochs):\n print('Iteration ', i)\n # Update W\n B = np.dot(self.H, self.T)\n drv = 2 * np.dot(np.dot(B, B.T), self.W) - \\\n 2 * np.dot(B, self.M.T) + self.lamb * self.W\n Hess = 2 * np.dot(B, B.T) + self.lamb * np.eye(self.dim)\n drv = np.reshape(drv, [self.dim * self.node_size, 1])\n rt = -drv\n dt = rt\n vecW = np.reshape(self.W, [self.dim * self.node_size, 1])\n while np.linalg.norm(rt, 2) > 1e-4:\n dtS = np.reshape(dt, (self.dim, self.node_size))\n Hdt = np.reshape(np.dot(Hess, dtS), [\n self.dim * self.node_size, 1])\n\n at = np.dot(rt.T, rt) / np.dot(dt.T, Hdt)\n vecW = vecW + at * dt\n rtmp = rt\n rt = rt - at * Hdt\n bt = np.dot(rt.T, rt) / np.dot(rtmp.T, rtmp)\n dt = rt + bt * dt\n self.W = np.reshape(vecW, (self.dim, self.node_size))\n\n # Update H\n drv = np.dot((np.dot(np.dot(np.dot(self.W, self.W.T), self.H), self.T)\n - np.dot(self.W, self.M.T)), self.T.T) + self.lamb * self.H\n drv = np.reshape(drv, (self.dim * self.feature_size, 1))\n rt = -drv\n dt = rt\n vecH = np.reshape(self.H, (self.dim * self.feature_size, 1))\n while np.linalg.norm(rt, 2) > 1e-4:\n dtS = np.reshape(dt, (self.dim, self.feature_size))\n Hdt = np.reshape(np.dot(np.dot(np.dot(self.W, self.W.T), dtS), np.dot(self.T, self.T.T))\n + self.lamb * dtS, (self.dim * self.feature_size, 1))\n at = np.dot(rt.T, rt) / np.dot(dt.T, Hdt)\n vecH = vecH + at * dt\n rtmp = rt\n rt = rt - at * Hdt\n bt = np.dot(rt.T, rt) / np.dot(rtmp.T, rtmp)\n dt = rt + bt * dt\n self.H = np.reshape(vecH, (self.dim, self.feature_size))\n self.Vecs = np.hstack(\n (normalize(self.W.T), normalize(np.dot(self.T.T, self.H.T))))\n\n # print(self.Vecs)\n # print(self.Vecs.shape)\n # get embeddings\n self.vectors_1 = {}\n self.vectors_2 = {}\n # todo possible has mistake\n look_back_1 = self.graph1.look_back_list\n node_size_1 = self.graph1.N\n # print(look_back_1)\n look_back_2 = self.graph2.look_back_list\n node_size_2 = self.graph2.N\n # print(look_back_2)\n # print(\"****************\")\n for i, embedding in enumerate(self.Vecs):\n # print(i)\n # print(\"---------------\")\n # print(look_back[i])\n if i <= node_size_1 - 1:\n self.vectors_1[look_back_1[i]] = embedding\n else:\n self.vectors_2[look_back_2[i - node_size_1]] = embedding\n return pd.DataFrame(self.vectors_1), pd.DataFrame(self.vectors_2)\n\n def getSimilarityMatrix(self, rep_method, combineFeature):\n \"\"\"\n\n :param rep_method:\n :return: simlar matrix\n \"\"\"\n C = np.zeros((self.node_size, self.node_size))\n for out_node_index in range(self.node_size): # for each of N nodes\n for inner_node_index in range(self.node_size): # for each of p landmarks\n # calcu similar matrix\n C[out_node_index, inner_node_index] = self.compute_similarity(\n rep_method,\n combineFeature[out_node_index],\n combineFeature[inner_node_index])\n return C\n\n def compute_similarity(self, rep_method, vec1, vec2):\n \"\"\"\n self.node_attributes: tuple of (same length) vectors of node attributes for corresponding nodes\n :param rep_method:\n :param vec1:\n :param vec2: two vectors of the same length\n # :param node_indices:\n :return: number between 0 and 1 representing their similarity\n \"\"\"\n dist = rep_method.gammastruc * np.linalg.norm(vec1 - vec2) # compare distances between structural identities\n return np.exp(-dist)\n\n def getT(self):\n g = self.graph1.G\n look_back = self.graph1.look_back_list\n features_1 = np.vstack([g.nodes[look_back[i]]['feature']\n for i in range(g.number_of_nodes())])\n g = self.graph2.G\n look_back = self.graph2.look_back_list\n features_2 = np.vstack([g.nodes[look_back[i]]['feature']\n for i in range(g.number_of_nodes())])\n self.features = np.vstack((features_1, features_2))\n print(self.features.shape)\n # self.preprocessFeature()\n # print(self.features.T)\n # print(self.features.T.shape)\n return self.features.T\n\n def preprocessFeature(self):\n if self.features.shape[1] > 200:\n U, S, VT = la.svd(self.features)\n Ud = U[:, 0:200]\n Sd = S[0:200]\n self.features = np.array(Ud) * Sd.reshape(200)\n\n\nif __name__ == \"__main__\":\n warnings.filterwarnings(\"ignore\", category=FutureWarning)\n rep_method = RepMethod(max_layer=2)\n arg_1 = args.args()\n arg_1.input = \"data/test/karate.edgelist_1\"\n arg_1.feature_file = \"data/test/cora.features_1\"\n t1 = time.time()\n nx_graph_1 = nx.read_edgelist(arg_1.input, nodetype=int, comments=\"%\")\n adj_matrix_1 = nx.adjacency_matrix(nx_graph_1).todense()\n g_1 = Graph(adj_matrix_1)\n\n g_1.read_edgelist(filename=arg_1.input, weighted=arg_1.weighted,\n directed=arg_1.directed)\n g_1.read_node_features(arg_1.feature_file)\n xTawd_1 = XTADW(g_1, arg_1.representation_size)\n structure_feature_1 = xTawd_1.get_features(rep_method)\n # print(structure_feature_1)\n # print(structure_feature_1.shape)\n\n arg_2 = args.args()\n arg_2.input = \"data/test/karate.edgelist_3\"\n arg_2.feature_file = \"data/test/cora.features_3\"\n nx_graph_2 = nx.read_edgelist(arg_2.input, nodetype=int, comments=\"%\")\n adj_matrix_2 = nx.adjacency_matrix(nx_graph_2).todense()\n g_2 = Graph(adj_matrix_2)\n\n g_2.read_edgelist(filename=arg_2.input, weighted=arg_2.weighted,\n directed=arg_2.directed)\n g_2.read_node_features(arg_2.feature_file)\n xTawd_2 = XTADW(g_2, arg_2.representation_size)\n structure_feature_2 = xTawd_2.get_features(rep_method)\n # print(structure_feature_2)\n structure_feature_2 = np.pad(structure_feature_2, ((0, 0),\n (0,\n abs(structure_feature_1.shape[1]\n - structure_feature_2.shape[1]))),\n 'constant', constant_values=(0, 0))\n # print(structure_feature_2.shape)\n # print(structure_feature_2)\n combineFuture = np.vstack((structure_feature_1, structure_feature_2))\n # print(combineFuture.shape)\n recm = RECM(6, 0.2, g_1, g_2)\n recm.getT()\n df1, df2 = recm.train(1, rep_method, combineFuture)\n algorithm = SCAN(g_1.G, 0.7, 3)\n communities = algorithm.execute()\n for community in communities:\n print('community: ', sorted(community))\n hubs_outliers = algorithm.get_hubs_outliers(communities)\n print('hubs: ', hubs_outliers[0])\n print('outliers: ', hubs_outliers[1])\n\n algorithm = SCAN(g_2.G, 0.7, 3)\n communities = algorithm.execute()\n for community in communities:\n print('community: ', sorted(community))\n hubs_outliers = algorithm.get_hubs_outliers(communities)\n print('hubs: ', hubs_outliers[0])\n print('outliers: ', hubs_outliers[1])\n print(df1)\n res = getEMDCommunity(['1', '13', '3', '7'], ['1', '13', '3', '7'], df1,df2)\n print(res)\n print('Success')\n\n\n","sub_path":"FindSimilarityCommunity/src/recm.py","file_name":"recm.py","file_ext":"py","file_size_in_byte":9278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"165819740","text":"\"\"\"\nwhereivebeen\n============\n\nDownload and aggregate your tagged locations from the APIs of\nvarious social networks.\n\n:copyright: (c) 2014 Jonathan Robson\n:license: MIT (see LICENSE for details)\n\"\"\"\n\nimport calendar\nimport json\nimport logging\nimport os\nimport time\nimport sqlite3\n\nimport click\nimport requests\n\n\npass_db = click.make_pass_decorator(sqlite3.Connection)\n\nlogger = logging.getLogger(__name__)\n\n\nclass LogFormatter(logging.Formatter):\n \"\"\"Custom logging formatter\"\"\"\n\n styles = {\n logging.DEBUG: {'fg': 'blue'},\n logging.INFO: {'bold': True},\n logging.WARNING: {'fg': 'yellow'},\n logging.ERROR: {'fg': 'red'},\n logging.CRITICAL: {'fg': 'white', 'bg': 'red'},\n }\n\n def format(self, record):\n \"\"\"Format the log record.\"\"\"\n record.message = record.getMessage()\n record.asctime = self.formatTime(record, self.datefmt)\n return click.style(self._fmt % record.__dict__, **self.styles[record.levelno])\n\n\n@click.group()\n@click.option('db_file', '--db', type=click.Path(),\n default=os.path.expanduser('~/.whereivebeen.db'),\n help='The path to the SQLite3 database (default: ~/.whereivebeen.db).')\n@click.pass_context\ndef cli(ctx, db_file):\n \"\"\"Download and aggregate your tagged locations from the APIs of\n various social networks.\n\n Currently supported source APIs: foursquare, instagram, facebook\n\n It works by downloading the data into a local SQLite3 database, and\n then you can dump the locations as JSON for easier use on the web.\n \"\"\"\n db = sqlite3.connect(db_file)\n cursor = db.cursor()\n cursor.execute('SELECT * FROM sqlite_master')\n if not cursor.fetchall():\n # Intialize the database if it's empty.\n cursor.executescript(\"\"\"\n CREATE TABLE locations (time INT, source TEXT, name TEXT, lat REAL, lng REAL);\n CREATE TABLE sources (name TEXT, token TEXT);\n INSERT INTO sources VALUES('foursquare', NULL);\n INSERT INTO sources VALUES('instagram', NULL);\n INSERT INTO sources VALUES('facebook', NULL);\n CREATE INDEX idx_locations_time on locations (time);\n CREATE INDEX idx_locations_source on locations (source);\n CREATE INDEX idx_locations_name on locations (name);\n CREATE INDEX idx_locations_lat on locations (lat);\n CREATE INDEX idx_locations_lng on locations (lng);\n \"\"\")\n db.commit()\n ctx.obj = db\n\n # Setup logger\n logger.setLevel(logging.DEBUG)\n log_handler = logging.StreamHandler()\n log_handler.setLevel(logging.DEBUG)\n log_handler.setFormatter(LogFormatter(\n '%(asctime)s %(levelname)-7s %(message)s', datefmt='%Y-%m-%d %H:%M:%S'))\n logger.addHandler(log_handler)\n\n\n@cli.command('set-token')\n@click.argument('source', type=click.Choice(['foursquare', 'instagram', 'facebook']))\n@click.argument('token')\n@pass_db\ndef set_token(db, source, token):\n \"\"\"Set the access token for a source API.\"\"\"\n db.execute('UPDATE sources SET token = :token WHERE name = :name',\n {'token': token, 'name': source})\n db.commit()\n\n\ndef load_locations(db, source, locations):\n \"\"\"Load locations into the database.\"\"\"\n if not locations:\n return\n cursor = db.cursor()\n count = 0\n for location in locations:\n # If there's an existing check-in that's within 30 seconds of\n # this one and almost exactly the same location, it's probably\n # the same one that was just cross-posted on a different site.\n cursor.execute(\n \"\"\"\n SELECT count(1) FROM locations WHERE abs(time - :time) < 30\n AND abs(lat - :lat) <= 0.001 AND abs(lng - :lng) <= 0.001\n \"\"\",\n {'time': location['time'], 'lat': location['lat'], 'lng': location['lng']}\n )\n if cursor.fetchone()[0] > 0:\n continue\n\n cursor.execute(\n \"\"\"\n INSERT INTO locations (time, source, name, lat, lng)\n VALUES (:time, :source, :name, :lat, :lng)\n \"\"\",\n location\n )\n count += 1\n\n db.commit()\n logger.info('Saved {saved} locations from {source} (skipped {skipped})'.format(\n saved=count, source=source, skipped=len(locations)-count\n ))\n\n\n@cli.command()\n@pass_db\ndef etl(db):\n \"\"\"Download location data from source APIs.\"\"\"\n cursor = db.cursor()\n cursor.execute('SELECT name, token FROM sources')\n tokens = dict(cursor.fetchall())\n\n cursor.execute('SELECT source, max(time) FROM locations GROUP BY source')\n after_timestamps = dict(cursor.fetchall())\n\n # TODO: Make the code below more dynamic.\n\n if tokens['foursquare']:\n load_locations(db, 'foursquare', etl_foursquare(\n 'https://api.foursquare.com/v2/users/self/checkins',\n params={'v': '20140612', 'oauth_token': tokens['foursquare']},\n after_timestamp=after_timestamps.get('foursquare', 0),\n ))\n\n if tokens['instagram']:\n load_locations(db, 'instagram', etl_instagram(\n 'https://api.instagram.com/v1/users/self/media/recent',\n params={'access_token': tokens['instagram']},\n after_timestamp=after_timestamps.get('instagram', 0),\n ))\n\n if tokens['facebook']:\n load_locations(db, 'facebook', etl_facebook(\n 'https://graph.facebook.com/v2.0/me/tagged_places',\n params={'access_token': tokens['facebook']},\n after_timestamp=after_timestamps.get('facebook', 0),\n ))\n\n\ndef etl_foursquare(url, params, after_timestamp=0):\n \"\"\"Download locations from Foursquare.\"\"\"\n params.update({\n 'afterTimestamp': after_timestamp + 1,\n 'limit': 100,\n 'offset': 0,\n })\n locations = []\n while True:\n logger.debug('Making API call to ' + url)\n response = requests.get(url, params=params)\n data = response.json()\n if response.status_code != 200:\n logger.error('(Foursquare API) ' + json.dumps(data['meta']))\n return\n\n if not data['response']['checkins']['items']:\n break\n\n for item in data['response']['checkins']['items']:\n locations.append({\n 'time': item['createdAt'],\n 'source': 'foursquare',\n 'name': item['venue']['name'],\n 'lat': item['venue']['location']['lat'],\n 'lng': item['venue']['location']['lng'],\n })\n\n params['offset'] += 100\n time.sleep(1)\n\n logger.info('Downloaded {0} locations from foursquare'.format(len(locations)))\n\n return locations\n\n\ndef etl_instagram(url, params, after_timestamp=0):\n \"\"\"Download locations from Instagram.\"\"\"\n params.update({\n 'min_timestamp': after_timestamp + 1,\n })\n locations = []\n while True:\n logger.debug('Making API call to ' + url)\n response = requests.get(url, params=params)\n data = response.json()\n if response.status_code != 200:\n logger.error('(Instagram API) ' + json.dumps(data['meta']))\n return\n\n if not data['data']:\n break\n\n for item in data['data']:\n if not item['location'] or 'name' not in item['location']:\n continue\n locations.append({\n 'time': int(item['created_time']),\n 'source': 'instagram',\n 'name': item['location']['name'],\n 'lat': item['location']['latitude'],\n 'lng': item['location']['longitude'],\n })\n\n if 'next_max_id' not in data['pagination']:\n break\n\n params['max_id'] = data['pagination']['next_max_id']\n time.sleep(1)\n\n logger.info('Downloaded {0} locations from instagram'.format(len(locations)))\n\n return locations\n\n\ndef etl_facebook(url, params, after_timestamp=0):\n \"\"\"Download locations from Facebook.\"\"\"\n locations = []\n complete = False\n while True:\n logger.debug('Making API call to ' + url)\n response = requests.get(url, params=params)\n data = response.json()\n if response.status_code != 200:\n logger.error('(Facebook API) ' + json.dumps(data))\n return\n\n if not data['data']:\n break\n\n for item in data['data']:\n epoch = calendar.timegm(time.strptime(item['created_time'], '%Y-%m-%dT%H:%M:%S+0000'))\n if epoch < 0:\n continue\n if epoch <= after_timestamp:\n complete = True\n break\n try:\n locations.append({\n 'time': epoch,\n 'source': 'facebook',\n 'name': item['place']['name'],\n 'lat': item['place']['location']['latitude'],\n 'lng': item['place']['location']['longitude'],\n })\n except KeyError:\n continue\n\n if complete:\n break\n\n params['after'] = data['paging']['cursors']['after']\n time.sleep(1)\n\n logger.info('Downloaded {0} locations from facebook'.format(len(locations)))\n\n return locations\n\n\n@cli.command()\n@click.option('--callback', metavar='FUNCTION', help='Optional callback function for JSONP.')\n@pass_db\ndef dump(db, callback):\n \"\"\"Dump all locations as JSON.\"\"\"\n db.row_factory = sqlite3.Row\n cursor = db.cursor()\n cursor.execute(\"\"\"\n SELECT name, round(lat, 4) AS rnd_lat, round(lng, 4) AS rnd_lng FROM locations\n GROUP BY (rnd_lat || ',' || rnd_lng)\n \"\"\")\n data = {'type': 'FeatureCollection', 'features': []}\n for i, row in enumerate(cursor):\n data['features'].append({\n 'id': i,\n 'type': 'Feature',\n 'geometry': {\n 'type': 'Point',\n 'coordinates': [row['rnd_lng'], row['rnd_lat']]\n },\n 'properties': {\n 'name': row['name']\n }\n })\n output = json.dumps(data, separators=(',', ':'))\n if callback is not None:\n output = callback + '(' + output + ');'\n click.echo(output)\n","sub_path":"whereivebeen.py","file_name":"whereivebeen.py","file_ext":"py","file_size_in_byte":10198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"213361191","text":"import numpy as np\nimport tensorflow as tf\n\ndef model_fn(features, labels, mode):\n W = tf.get_variable(\"W\", [1], dtype=tf.float64)\n b = tf.get_variable(\"b\", [1], dtype=tf.float64)\n y = W*features['x'] + b\n \n loss = tf.reduce_sum(tf.square(y - labels))\n\n global_step = tf.train.get_global_step()\n optimizer = tf.train.GradientDescentOptimizer(0.01)\n train = tf.group(optimizer.minimize(loss),\n tf.assign_add(global_step, 1))\n return tf.estimator.EstimatorSpec(\n mode=mode,\n predictions=y,\n loss=loss,\n train_op=train)\n\nestimator = tf.estimator.Estimator(model_fn=model_fn)\n\nx_train = np.array([1., 2., 3., 4.])\ny_train = np.array([0., -1., -2., -3.])\nx_eval = np.array([2., 5., 8., 1.])\ny_eval = np.array([-1.01, -4.1, -7., 0.])\n\ninput_fn = tf.estimator.inputs.numpy_input_fn(\n {\"x\": x_train}, y_train, batch_size=4, num_epochs=None, shuffle=True)\ntrain_input_fn = tf.estimator.inputs.numpy_input_fn(\n {\"x\": x_train}, y_train, batch_size=4, num_epochs=1000, shuffle=False)\neval_input_fn = tf.estimator.inputs.numpy_input_fn(\n {\"x\": x_eval}, y_eval, batch_size=4, num_epochs=1000, shuffle=False)\n\nestimator.train(input_fn=input_fn, steps=1000)\n\ntrain_metrics = estimator.evaluate(input_fn=train_input_fn)\neval_metrics = estimator.evaluate(input_fn=eval_input_fn)\nprint(\"train metrics: %r\"% train_metrics)\nprint(\"eval metrics: %r\"% eval_metrics)\n\nw = tf.Variable(1)\nmul = tf.multiply(w, 2)\nadd = tf.add(w, 2)\ngroup = tf.group(mul, add)\ntuples = tf.tuple([mul, add])\n#sess.run(group)和sess.run(tuple)都会求Tensor(add)\n#Tensor(mul)的值。区别是,tf.group()返回的是`op`\n#tf.tuple()返回的是list of tensor。\n#这样就会导致,sess.run(tuple)的时候,会返回 Tensor(mul),Tensor(add)的值.\n#而 sess.run(group)不会\nsess = tf.InteractiveSession()\nw.initializer.run()\ngroup.run()#没有输出\nsess.run(tuples)#有输出","sub_path":"python/spyder/ml/tensorflow/test4.py","file_name":"test4.py","file_ext":"py","file_size_in_byte":1894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"267866712","text":"from Monument import Monument\nimport importer_utils as utils\nimport requests\nfrom os import path\n\n\nMAPPING_DIR = \"mappings\"\n\n\nclass SeBbrSv(Monument):\n\n def update_labels(self):\n \"\"\"\n Original labels look like this:\n Wickmanska gården (Paradis 35)\n We don't need the latter part (fastighetsbeteckning) in the label.\n \"\"\"\n label = utils.get_rid_of_brackets(utils.remove_markup(self.namn))\n self.add_label(\"sv\", label)\n return\n\n def set_bbr(self):\n \"\"\"\n This will get a link that looks like\n raa/bbr/21300000002805\n Depending on whether the prefix is raa/bbr/ or raa/bbra/\n \"\"\"\n bbr_link = utils.get_bbr_link(self.bbr)\n self.add_statement(\"cultural_heritage_sweden\", bbr_link)\n\n def set_heritage_bbr(self):\n \"\"\"\n In Sweden there are three different types of legal protection\n for different types of cultural heritage,\n so we created three new items:\n\n governmental listed building complex (Q24284071)\n for buildings owned by the state,\n\n individual listed building complex (Q24284072)\n for privately owned buildings,\n\n ecclesiastical listed building complex (Q24284073)\n for older buildings owned by the Church of Sweden.\n\n Which legal protection each monument goes under\n is not stored in the WLM database.\n We therefore need to look that up by\n querying the source database via their API.\n \"\"\"\n protection_date = False\n url = \"http://kulturarvsdata.se/\" + \\\n self.wd_item[\"statements\"][self.props[\"cultural_heritage_sweden\"]][0][\"value\"]\n url_list = url.split(\"/\")\n url_list.insert(-1, \"jsonld\")\n url = \"/\".join(url_list)\n data = requests.get(url).json()\n for element in data[\"@graph\"]:\n if \"ns5:spec\" in element:\n bbr_type = element[\"ns5:spec\"]\n if bbr_type.startswith(\"Kyrkligt kulturminne\"):\n type_q = \"Q24284073\"\n elif bbr_type.startswith(\"Byggnadsminne\"):\n type_q = \"Q24284072\"\n protection_date = bbr_type.split(\"(\")[-1][:-1]\n elif bbr_type.startswith(\"Statligt byggnadsminne\"):\n type_q = \"Q24284071\"\n protection_date = bbr_type.split(\"(\")[-1][:-1]\n \"\"\"\n The original set_heritage() added an empty claim\n because there's no heritage status specified in mapping file,\n so we start by removing that empty claim.\n \"\"\"\n self.remove_statement(\"heritage_status\")\n if protection_date:\n # 1969-01-31\n date_dict = utils.date_to_dict(protection_date, \"%Y-%m-%d\")\n qualifier = {\"start_time\":\n {\"time_value\": date_dict}}\n else:\n qualifier = None\n self.add_statement(\"heritage_status\", type_q, qualifier)\n\n def set_function(self):\n \"\"\"\n TODO\n examples:\n https://gist.github.com/Vesihiisi/f637916ea1d80a4be5d71a3adf6e2dc2\n \"\"\"\n # functions = get_rid_of_brackets(self.funktion).lower().split(\",\")\n return\n\n def set_architect(self):\n \"\"\"\n Add architect claim if available.\n Only if wikilinked.\n Can be more than one.\n Check if it's a human.\n \"\"\"\n if self.has_non_empty_attribute(\"arkitekt\"):\n architects = utils.get_wikilinks(self.arkitekt)\n for name in architects:\n wp_page = name.title\n q_item = utils.q_from_wikipedia(\"sv\", wp_page)\n if q_item is not None:\n if utils.is_whitelisted_P31(q_item, [\"Q5\"]):\n self.add_statement(\"architect\", q_item)\n\n def set_location(self):\n \"\"\"\n TODO\n This is the same as 'address' in monuments_all.\n There are some street addresses. Some are simple:\n Norra Murgatan 3\n Some are complex:\n Skolgatan 5, Västra Kyrkogatan 3\n Norra Murgatan 27, Uddens gränd 14-16\n \"\"\"\n if self.has_non_empty_attribute(\"plats\"):\n if utils.count_wikilinks(self.plats) == 1:\n location = utils.q_from_first_wikilink(\"sv\", self.plats)\n self.add_statement(\"location\", location)\n\n def update_descriptions(self):\n \"\"\"\n Use fastighetsbeteckning as alias.\n For example:\n (Knutse 2:19)\n \"\"\"\n fastighetsbeteckning = utils.get_text_inside_brackets(self.namn)\n self.add_alias(\"sv\", fastighetsbeteckning)\n\n def set_no_of_buildings(self):\n \"\"\"\n The 'funktion' column looks like this:\n Kapell (3 byggnader)\n From this, we extract: has parts of class building,\n and how many as qualifier.\n Some items don't have any numbers, so we ignore those.\n \"\"\"\n extracted_no = utils.get_number_from_string(\n utils.get_text_inside_brackets(self.funktion))\n if extracted_no is not None:\n self.add_statement(\n \"has_parts_of_class\", \"Q41176\",\n {\"quantity\": {\"quantity_value\": extracted_no}})\n\n def set_adm_location(self):\n \"\"\"\n Use offline mapping file\n to map municipality to P131.\n The column is 'kommun'.\n It looks like this:\n Alingsås\n Just the name of the municipality\n without the word kommun or genitive.\n \"\"\"\n if self.kommun == \"Göteborg\":\n municip_name = \"Gothenburg\"\n else:\n municip_name = self.kommun\n municip_dict = utils.load_json(path.join(\n MAPPING_DIR, \"sweden_municipalities.json\"))\n pattern_en = municip_name.lower() + \" municipality\"\n try:\n municipality = [x[\"item\"] for x in municip_dict if x[\n \"en\"].lower() == pattern_en][0]\n self.add_statement(\"located_adm\", municipality)\n except IndexError:\n print(\"Could not parse municipality: {}.\".format(self.kommun))\n return\n\n def set_inception(self):\n \"\"\"\n The 'byggar' column can have many forms,\n but here we only process the obvious cases:\n 1865\n [[1865]]\n It can also look like:\n 1100- eller 1200-talet\n and many other variants, which are ignored.\n \"\"\"\n if self.has_non_empty_attribute(\"byggar\"):\n year_parsed = utils.parse_year(self.byggar)\n if year_parsed is not None:\n self.add_statement(\"inception\", {\"time_value\": year_parsed})\n\n def set_monuments_all_id(self):\n \"\"\"\n Map which column name in specific table\n is used as ID in monuments_all.\n \"\"\"\n self.monuments_all_id = self.bbr\n\n def __init__(self, db_row_dict, mapping, data_files, existing):\n Monument.__init__(self, db_row_dict, mapping, data_files, existing)\n self.set_monuments_all_id()\n self.set_changed()\n self.wlm_source = self.create_wlm_source(self.monuments_all_id)\n self.set_country()\n self.set_is()\n self.set_heritage()\n self.set_source()\n self.set_registrant_url()\n self.update_labels()\n self.update_descriptions()\n self.set_image(\"bild\")\n self.exists(\"sv\")\n self.set_commonscat()\n self.set_coords((\"lat\", \"lon\"))\n self.set_inception()\n self.set_no_of_buildings()\n self.set_bbr()\n self.set_heritage_bbr()\n self.set_adm_location()\n self.set_architect()\n self.set_location()\n self.set_function()\n self.exists_with_prop(mapping)\n","sub_path":"importer/SeBbrSv.py","file_name":"SeBbrSv.py","file_ext":"py","file_size_in_byte":7776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"580688922","text":"\"\"\"create_available_parking_space_table\n\nRevision ID: 5d99d954f4bd\nRevises: e6def7ff3050\nCreate Date: 2016-11-16 07:30:32.785982\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '5d99d954f4bd'\ndown_revision = 'e6def7ff3050'\nbranch_labels = None\ndepends_on = None\n\ndef upgrade():\n op.create_table(\n 'available_parking_space_pool',\n sa.Column('plate', sa.String(7), sa.ForeignKey('vehicles.plate'), primary_key=True),\n sa.Column('latitude', sa.DECIMAL(precision=32, scale=6), nullable=False),\n sa.Column('longitude', sa.DECIMAL(precision=32, scale=6), nullable=False),\n sa.Column('location', sa.String(255), nullable=True),\n sa.Column('level', sa.Integer, nullable=True),\n sa.Column('is_active', sa.Boolean, nullable=False, default=False),\n sa.Column('created_at', sa.DateTime, nullable=False, default=sa.func),\n sa.Column('updated_at', sa.DateTime, nullable=False, default=sa.func),\n )\n\n\ndef downgrade():\n op.drop_table('available_parking_space_pool')\n","sub_path":"alembic/versions/5d99d954f4bd_create_available_parking_space_pool_table.py","file_name":"5d99d954f4bd_create_available_parking_space_pool_table.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"557579661","text":"from adafruit_ads1x15.analog_in import AnalogIn # from Adafruit_Python_ADS1x15\nimport time\nimport logging\nimport csv\nimport datetime as dt\n\nclass Pin:\n def __init__(self, id, adc, adc_pin, threshold = 32000, record_values = False):\n \"\"\"\n Args:\n id (int): Unique ID to every pin (should be unique across RPis too)\n adc (adafruit_ads1x15.ads1115.ADS.ADS1115): Represents one ADC module.\n adc_pin (adafruit_ads1x15.ads1115.ADS.P0/1/2/3): Represents a pin on the ADC module.\n threshold (int, optional): Light threshold. The pin is on if the light value is below\n this threshold.\n record_values (boolean): Whether to record light value readings. Readings\n are recorded in a csv file named pin-.csv.\n\n Returns:\n A new Pin.\n \"\"\"\n if type(id) != int:\n raise TypeError('id is {}, which is a {}. It should be an int.'.format(id,type(id)))\n if type(threshold) != int and type(threshold) != float:\n raise TypeError('threshold is {}, which is a {}. It should be a number.'.format(id,type(id)))\n if type(record_values) != bool:\n raise TypeError('record_values is {}, which is a {}. It should be a bool.'.format(id,type(id)))\n\n self.id = id\n self.adc = adc\n self.adc_pin = adc_pin\n self.threshold = threshold\n self.record_values = record_values\n\n def is_on_single(self):\n \"\"\"Checks if this pin is currently on, based on 1 reading. This may return off if the pin is blinking.\n\n Returns:\n A tuple of whether the pin is on (bool) and the light value reading (int).\n \"\"\"\n light_value = AnalogIn(self.adc, self.adc_pin).value\n# print(str(self), light_value)\n return light_value < self.threshold, light_value\n \n def is_on(self):\n \"\"\"Checks whether this pin is on, which includes blinking. Records light values to\n a csv file, if self.record_values is True.\n\n Returns:\n True if this pin is on or blinking, False otherwise.\n The pin will appear off if it is disconnected.\n \"\"\"\n log = logging.getLogger()\n values = []\n final_on = False\n for i in range(20):\n on, val = self.is_on_single()\n values.append(val)\n if not final_on and on:\n final_on = True\n time.sleep(0.1)\n\n log.debug(\"{} is off. Values:{}\".format(self, values))\n\n if self.record_values:\n with open('pin-' + str(self.id) + '.csv','a+') as f:\n writer = csv.writer(f,quoting=csv.QUOTE_NONNUMERIC)\n now = dt.datetime.now().isoformat()\n rows = map(lambda v: (now,v), values)\n writer.writerows(rows)\n\n return final_on\n\n def __str__(self):\n return \"Pin {} (threshold={})\".format(self.id, self.threshold)\n\n __repr__ = __str__\n","sub_path":"pin.py","file_name":"pin.py","file_ext":"py","file_size_in_byte":2984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"88289115","text":"#!/usr/bin/env python\n\nfrom RestAPIMethod import RESTResource\nfrom tools.ssh_executor import ssh_executor\nfrom json import dumps\nimport os\nfrom couchdb_layer.mcm_database import database\nfrom collections import defaultdict\nfrom tools.user_management import access_rights\n\nclass GetBjobs(RESTResource):\n def __init__(self):\n self.access_limit = access_rights.user\n\n def GET(self, *args):\n \"\"\"\n Get bjobs information regarding the batch jobs\n \"\"\"\n ssh_exec = ssh_executor()\n try:\n stdin, stdout, stderr = ssh_exec.execute(self.create_command(args))\n out = stdout.read()\n err = stderr.read()\n if err:\n if \"No job found in job group\" in err: # so the shown string is consistent with production\n return dumps({\"results\": 'No unfinished job found'})\n return dumps({\"results\": err})\n return dumps({\"results\": out})\n finally:\n ssh_exec.close_executor()\n\n def create_command(self, options):\n bcmd = 'bjobs'\n for opt in options:\n if '-g' in opt:\n bcmd += ' -g ' + '/' + '/'.join(opt.split()[1:])\n else:\n bcmd += opt\n return bcmd\n\n\nclass GetLogFeed(RESTResource):\n def __init__(self):\n self.access_limit = access_rights.user\n\n def GET(self, *args):\n \"\"\"\n Gets a number of lines from given log.\n \"\"\"\n if not args:\n self.logger.error('No arguments were given')\n return dumps({\"results\": 'Error: No arguments were given'})\n name = os.path.join('logs', args[0])\n nlines = -1\n if len(args) > 1:\n nlines = int(args[1])\n return dumps(self.read_logs(name, nlines))\n\n def read_logs(self, name, nlines):\n\n with open(name) as log_file:\n try:\n data = log_file.readlines()\n except IOError as ex:\n self.logger.error('Could not access logs: \"{0}\". Reason: {1}'.format(name, ex))\n return {\"results\": \"Error: Could not access logs.\"}\n\n if nlines > 0:\n data = data[-nlines:]\n return {\"results\": ''.join(data)}\n\n\nclass GetRevision(RESTResource):\n def __init__(self):\n self.access_limit = access_rights.user\n\n def GET(self, *args):\n \"\"\" \n returns the current tag of the software running\n \"\"\"\n revision=os.getenv('MCM_REVISION')\n return revision\n\n\nclass GetLogs(RESTResource):\n def __init__(self):\n self.access_limit = access_rights.user\n self.path = \"logs\"\n\n def GET(self, *args):\n \"\"\"\n Gets a list of logs sorted by date.\n \"\"\"\n\n files_dates = sorted([{\"name\": filename, \"modified\": os.path.getmtime(os.path.join(self.path, filename))}\n for filename in os.listdir(self.path)\n if os.path.isfile(os.path.join(self.path, filename))], key=lambda x: x[\"modified\"],\n reverse=True)\n\n return dumps({\"results\": files_dates})\n\n\nclass GetStats(RESTResource):\n def __init__(self):\n self.access_limit = access_rights.administrator\n\n def GET(self, *args):\n \"\"\"\n Get a bunch of stat information, as a test\n \"\"\"\n\n def render( fcns, divs):\n display='''\\\n\n \n \n \n \n \n %s\n \n\n'''% ( fcns, divs )\n return display\n\n def oneChart( title, data, opt=''):\n opt_s=''\n if opt=='log':\n opt_s=',vAxis: {logScale:true}'\n\n fcn='''\\\n var %s = google.visualization.arrayToDataTable( %s );\n\n var options_chart_%s = {\n title: \"Status for %s\",\n hAxis: {title: \"Campaign\", titleTextStyle: {color: \"red\"}}%s\n };\n\n var chart_%s = new google.visualization.ColumnChart(document.getElementById(\"chart_div_%s\"));\n chart_%s.draw(%s, options_chart_%s);\n '''%( title, dumps(data), \n title,\n title,\n opt_s,\n title,title,\n title,title,title )\n div='\\n'%( title )\n return (fcn,div)\n\n def oneGauge( title, data):\n h = int(250 * (len(data)/5. +1))\n fcn='''\\\n var %s = google.visualization.arrayToDataTable( %s ); var options_gauge = {\n height: %d,\n redFrom: 90, redTo: 100,\n yellowFrom:75, yellowTo: 90,\n minorTicks: 5\n };\n var gauge_%s = new google.visualization.Gauge(document.getElementById('gauge_div_%s'));\n gauge_%s.draw(%s,options_gauge)\n '''%( title, dumps(data),\n h,\n title,title,\n title,title)\n\n div='\\n'%( title )\n return (fcn,div)\n\n\n\n rdb = database('requests')\n crdb =database('chained_requests')\n ccdb =database('chained_campaigns')\n cdb = database('campaigns')\n\n html=\"\\n\"\n html+=\"This is a stats page internal to McM\\n\"\n\n counts = defaultdict(lambda: defaultdict(int) )\n counts_e = defaultdict(lambda: defaultdict(int) )\n sums = defaultdict(int) \n\n statuses=['new', 'validation', 'approved' , 'submitted', 'done', 'upcoming']\n data = []\n data.append( ['Step'] + statuses )\n data_g=[['Label','Value']]\n\n a_cc = args[0]\n if a_cc == 'all':\n all_r = rdb.get_all()\n #all_r = rdb.queries(['member_of_campaign==Summer12'])\n for mcm_r in all_r:\n counts[str(mcm_r['member_of_campaign'])] [mcm_r['status']] +=1\n to_add=mcm_r['total_events']\n if mcm_r['status'] in ['submitted','done']:\n to_add=mcm_r['completed_events']\n try:\n counts_e[str(mcm_r['member_of_campaign'])] [mcm_r['status']] += int(to_add)\n except:\n self.logger.error('cannot seem to be able to digest \"%s\" for %s' % (to_add, mcm_r['prepid']))\n\n \n for c in sorted(counts.keys()):\n a=0\n entry=[]\n entry.append( c ) # step[1] is the flow name \n for s in statuses:\n entry.append( counts_e[c][s] )\n a+=counts_e[c][s]\n if not a:\n g=0.\n else:\n g = int(float(counts_e[c]['done']) / float(a) * 100.)\n data.append(entry)\n data_g.append([c,g])\n\n\n (f,d)=oneChart('all', data, opt='log')\n (f1,d1)=oneGauge( a_cc+'_g', data_g)\n f+=f1\n d+=d1\n return render( f,d)\n \n \n if not ccdb.document_exists( a_cc ):\n return \"%s does not exists\" %( a_cc )\n mcm_cc = ccdb.get( a_cc )\n steps = map(lambda s : s[0], mcm_cc['campaigns'])\n all_cr = crdb.queries(['member_of_campaign==%s'%a_cc])\n\n for cc in all_cr:\n upcoming=0\n for r in cc['chain']:\n mcm_r = rdb.get(r)\n counts[str(mcm_r['member_of_campaign'])] [mcm_r['status']] +=1\n upcoming=mcm_r['total_events']\n if mcm_r['status'] in ['done']:\n counts_e[str(mcm_r['member_of_campaign'])] [mcm_r['status']] += mcm_r['completed_events']\n elif mcm_r['status'] in ['submitted']:\n ##split the stat in done and submitted accordingly\n counts_e[str(mcm_r['member_of_campaign'])] ['done'] += mcm_r['completed_events']\n counts_e[str(mcm_r['member_of_campaign'])] ['submitted'] += max([0, mcm_r['total_events'] - mcm_r['completed_events']])\n else:\n counts_e[str(mcm_r['member_of_campaign'])] [mcm_r['status']] += mcm_r['total_events']\n #fill up the rest with upcoming\n for noyet in steps[ len(cc['chain']):]:\n counts_e[str(noyet)]['upcoming'] += upcoming\n\n for step in mcm_cc['campaigns']:\n entry=[]\n entry.append( step[0] ) # step[1] is the flow name\n for s in statuses:\n entry.append( counts_e[step[0]][s] )\n data.append(entry)\n\n\n (f,d)=oneChart( a_cc, data)\n data_g=[['Label','Value']]\n for step in mcm_cc['campaigns']:\n a=0\n for s in statuses:\n a+=counts_e[step[0]][s]\n if not a: \n g=0.\n else:\n g = int(float(counts_e[step[0]]['done']) / float(a) * 100.)\n data_g.append( [step[0],g ])\n (f1,d1)=oneGauge( a_cc+'_g', data_g)\n f+=f1\n d+=d1\n return render( f,d)\n\n \"\"\"\n all_r = rdb.get_all()\n #all_r = rdb.queries(['member_of_campaign==Summer12'])\n\n to_count=['type','member_of_campaign','status']\n to_sum= ['total_events','completed_events']\n for r in all_r:\n for c in to_count:\n counts[c][r[c]]+=1 \n for s in to_sum:\n sums[s]+= r[s]\n \"\"\"\n \n\n \n\n # html+=\"Counts
\\n\"\n # html+=\"\\n\"\n # for c in counts:\n # html+=\"- %s
\\n\" % c\n # html+=\"\\n\"\n # for (n,v) in counts[c].items():\n # html+=\"- %15s : %10d
\\n\" % ( n, v )\n # html+=\"
\"\n # html+=\"
\"\n # html+=\"\\n\"\n # for (n,v) in sums.items():\n # html+=\"- %15s : %10d
\\n\" % ( n, v )\n # html+=\"
\"\n #\n #\n # html+=\"\"\n # return html\n","sub_path":"mcm/rest_api/DashboardActions.py","file_name":"DashboardActions.py","file_ext":"py","file_size_in_byte":10493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"411052133","text":"#CONFIG.py\n#handles configuration variables\n\nSCREEN_SIZE = width,height = 1024,768\nIMAGE_PATH = 'Pictures/'\nWEAPON_PATH = 'Weapons'\nPIXEL_ARRAY_PATH = 'pixelArrays'\nRED_TEAM_PATH = 'exterminator.txt'\nBLUE_TEAM_PATH = 'team1.txt'\n\nBLACK = (0,0,0)\nWHITE = (255,255,255)\nRED = (255,0,0)\nDARK_RED = (128,0,0)\nORANGE = (255,153,0)\nGREEN = (0,255,0)\nDARK_GREEN = (0,128,0)\nBLUE = (0,0,255)\nDARK_BLUE = (0,0,128)\n\n\nHEAT_LIMIT = 10.0\nBASE_EVASION = 35.0\n\n\n##### WEAPON REGISTER #####\n# weapons must be in this list to be used in the game.\nWEAPON_REGISTER = [None, 'SlugGun', 'MachineGun', 'Mortar', 'LightningGun', 'ArrowVolley', 'MP7', 'ccinator', 'WaterGun', 'PARHANATANK' ]\n\n","sub_path":"CONFIG.py","file_name":"CONFIG.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"127875707","text":"import numpy as np\nfrom scipy import linalg\n\nclass KalmanFilter(object):\n\n def __init__(self,\n state_dim,\n use_last_error=False,\n min_error_init=0.01,\n use_diagonal_approx=True,\n sos_init=10.0,\n error_init=1.0,\n reset_observation_noise=False,\n reset_state=False,\n window_size=20):\n self.state_dim = state_dim\n self.use_last_error = use_last_error\n self.min_error_init = min_error_init\n self.use_diagonal_approx = use_diagonal_approx\n self.sos_init = sos_init\n self.error_init = error_init\n self.reset_observation_noise = reset_observation_noise\n self.reset_state = reset_state\n\n attrs = vars(self)\n print ('KF parameters:' + ', '.join(\"%s: %s\" % item for item in attrs.items()))\n\n self.Pt = None\n self.Rt = None\n self.mean = None\n self.xt = None\n self.xt_old = None\n self.e = None\n\n if self.use_diagonal_approx:\n self.ones = np.ones((self.state_dim, 1))\n else:\n self.I = np.eye(self.state_dim)\n\n # total steps; used for variance and running mean\n self.steps = 0\n\n # windowed mean / var\n self.window_size = window_size\n self.window_index = 0\n self.window_buffer = np.empty((self.window_size, self.state_dim, 1))\n\n def reset(self):\n # set expected error\n if self.use_last_error and self.xt is not None:\n self.e = (self.xt - self.xt_old)\n # if less than threshold, set to minimum preserving sign\n # print (\"ERROR: \", self.e[np.abs(self.e) < self.min_error_init])\n # self.e[np.abs(self.e) < self.min_error_init] = np.sign(self.e[np.abs(self.e) < self.min_error_init]) * self.min_error_init\n else:\n # either xt is None or use_last_error = False\n if self.e is None:\n self.e = np.zeros((self.state_dim, 1))\n self.e.fill(self.error_init)\n\n if self.use_diagonal_approx:\n if self.reset_observation_noise or self.Rt is None:\n self.Rt = np.ones((self.state_dim, 1)) * 10\n expected_error = self.e**2.0 + np.ones((self.state_dim, 1)) * 1e-8 if self.use_last_error and self.Pt is not None else np.ones((self.state_dim, 1)) * self.error_init\n else:\n if self.reset_observation_noise or self.Rt is None:\n self.Rt = np.eye(self.state_dim) * 10\n expected_error = np.dot(self.e, np.transpose(self.e)) + np.eye(self.state_dim) * 1e-8 if self.use_last_error and self.Pt is not None else np.eye(self.state_dim) * self.error_init\n\n self.Pt = expected_error\n if self.reset_observation_noise or self.mean is None or self.sos is None:\n self.mean = np.zeros((self.state_dim, 1))\n self.sos = np.zeros((self.state_dim, 1))\n # initializing this high, gives conservative init\n self.sos.fill(self.sos_init)\n self.steps = 0\n\n # set xt on first step\n if self.reset_state or self.xt is None:\n self.xt = np.zeros((self.state_dim, 1))\n\n # print (\"OLD: \", self.xt_old)\n self.n = 0\n self.do_step = False\n self.xt_old = np.copy(self.xt)\n # self.xt = np.zeros((self.state_dim, 1))\n\n def compute_running_cov(self, y):\n \"\"\"\n See: https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Covariance\n \"\"\"\n # update mean; do this first to try to get better cov est\n mean_past = self.mean\n self.mean = self.mean + (y - self.mean) / self.n\n # assume n has already been incremented\n if self.n > 1: # don't compute cov when only one sample\n resid1 = y - mean_past # note this is the mean BEFORE seeing the sample y!\n resid2 = y - self.mean\n Y = np.dot(resid2, np.transpose(resid1)) # outer product\n self.Rt = ((self.n - 1) * self.Rt) / self.n + Y / self.n\n\n def update(self, y):\n \"\"\"\n Optimized update\n \"\"\"\n # print (\"Kalman update\")\n # always increment n first!\n self.n += 1\n self.steps += 1\n\n if self.use_diagonal_approx:\n mean_past = self.mean.copy()\n if self.steps <= self.window_size:\n # print (\"Not enough samples for window yet\")\n # no subtraction necessary\n self.mean = self.mean + (y - self.mean) / self.steps\n self.sos = self.sos + (y - mean_past) * (y - self.mean)\n # add to buffer\n self.window_buffer[self.window_index] = y.copy()\n # print (self.steps, self.window_index)\n else:\n # print (\"Using windowed update\")\n # remove old sample\n # https://stackoverflow.com/questions/5147378/rolling-variance-algorithm\n # new_mean = mean + (x_new - xs[next_index])/window_size;\n # varSum = var_sum + (x_new - mean) * (x_new - new_mean) - (xs[next_index] - mean) * (xs[next_index] - new_mean);\n\n last_index = (self.window_index+1) % self.window_size\n yold = self.window_buffer[last_index]\n self.mean = self.mean + y / self.window_size - yold / self.window_size\n self.sos = self.sos + (y + yold - mean_past - self.mean) * (y - yold)\n # self.sos = self.sos + (y - mean_past) * (y - self.mean) - (yold - mean_past) * (yold - self.mean)\n # overwrite the oldest element\n self.window_buffer[last_index] = y\n\n self.window_index += 1\n self.window_index %= self.window_size\n\n #print (\"m, sos: \", self.mean.shape, self.sos.shape)\n if self.steps > 1:\n if self.steps <= self.window_size:\n var = self.sos / self.steps #(self.steps-1)\n # print (\"No window var: \", var, self.mean)\n else:\n var = self.sos / self.steps #(self.window_size-1)\n # print (\"Window var: \", var, self.mean)\n # print (\"var: \", var)\n # input(\"\")\n self.Rt = var # leave as vector, makes for easier inversion of diag matrix\n else:\n # import time\n # start = time.time()\n # self.compute_running_cov(y)\n # end = time.time()\n # print (\"Time for cov update: \", (e-s))\n\n mean_past = self.mean.copy()\n if self.steps <= self.window_size:\n # print (\"Not enough samples for window yet\")\n # no subtraction necessary\n self.mean = self.mean + (y - self.mean) / self.steps\n self.sos = self.sos + np.dot((y - mean_past), np.transpose((y - self.mean)))\n # add to buffer\n self.window_buffer[self.window_index] = y.copy()\n # print (self.steps, self.window_index)\n else:\n # print (\"Using windowed update\")\n # remove old sample\n # https://stackoverflow.com/questions/5147378/rolling-variance-algorithm\n # new_mean = mean + (x_new - xs[next_index])/window_size;\n # varSum = var_sum + (x_new - mean) * (x_new - new_mean) - (xs[next_index] - mean) * (xs[next_index] - new_mean);\n\n last_index = (self.window_index+1) % self.window_size\n yold = self.window_buffer[last_index]\n self.mean = self.mean + y / self.window_size - yold / self.window_size\n # self.sos = self.sos + (y + yold - mean_past - self.mean) * (y - yold)\n self.sos = self.sos + np.dot((y - mean_past), np.transpose((y - self.mean))) - np.dot((yold - mean_past), np.transpose((yold - self.mean)))\n # overwrite the oldest element\n self.window_buffer[last_index] = y\n\n self.window_index += 1\n self.window_index %= self.window_size\n\n #print (\"m, sos: \", self.mean.shape, self.sos.shape)\n if self.steps > 1:\n if self.steps <= self.window_size:\n var = self.sos / self.steps #(self.steps-1)\n # print (\"No window var: \", var, self.mean)\n else:\n var = self.sos / self.steps #(self.window_size-1)\n # print (\"Window var: \", var, self.mean)\n # print (\"var: \", var)\n # input(\"\")\n self.Rt = var\n\n Et = y - self.xt\n\n # NOTE: this is being computed properly but was being overwhelmed by magnitude of Pt\n if self.use_diagonal_approx:\n # print (self.e, self.Pt, self.Rt)\n # import time\n # start = time.time()\n # import pdb\n # print (self.Pt, self.Rt)\n Kt = self.Pt * 1.0/(self.Pt + self.Rt + 1e-8)\n self.Pt = (self.ones - Kt) * self.Pt\n self.xt = self.xt + Kt * Et\n\n # print (\"pre error: \", self.e)\n self.e = (self.ones - Kt) * self.e\n # print (\"post error: \", self.e, Kt, self.Pt, self.Rt)\n self.Kt = Kt\n # print (Kt, self.xt)\n # print (Et)\n # print (self.Pt)\n # print (self.Rt)\n # input(\"\")\n #\n # if self.n == 10:\n # pdb.set_trace()\n\n # print (self.e, Kt, self.Pt, self.Rt)\n # input(\"\")\n # end = time.time()\n # print (\"Time for kalman update: \", (end-start))\n else:\n # import pdb\n # This may be simply too slow for practical usage...\n # import time\n # start = time.time()\n # print (self.Pt)\n # print (self.Rt)\n # Kt = self.Pt @ linalg.inv(self.Pt + self.Rt + 1e-8)\n Kt = self.Pt @ np.linalg.pinv(self.Pt + self.Rt + 1e-8)\n self.Pt = (self.I - Kt) @ self.Pt\n self.xt = self.xt + Kt @ Et\n self.e = (self.I - Kt) @ self.e\n # if self.n == 2:\n # pdb.set_trace()\n # print (Kt)\n # print (Et)\n # print (self.Pt)\n # print (self.Rt)\n # input(\"\")\n # print (np.linalg.norm(self.e))\n # end = time.time()\n # print (\"Time for kalman update: \", (end-start))\n\n # print (np.linalg.norm(self.e), Kt, self.Pt, self.Rt)\n # input(\"\")\n","sub_path":"rl_adaptive_sampling/backup/opt/kalman_opt.py","file_name":"kalman_opt.py","file_ext":"py","file_size_in_byte":10617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"584696732","text":"from django.conf.urls import url\nfrom .views import post_list, post_create, post_detail, post_update, post_delete\n\n# if there are a lot of views in the app, we will do next:\n# from import views as myapp_views\n# in the patterns we can use the following structure:\n# url(r'^$', myapp_views., name='')\nurlpatterns = [\n\turl(r'^$', post_list, name='list'),\n url(r'^create/$', post_create),\n url(r'^(?P[\\w-]+)/$', post_detail, name='detail'),\n url(r'^(?P[\\w-]+)/edit/$', post_update, name='update'),\n url(r'^(?P[\\w-]+)/delete/$', post_delete),\n]\n\n","sub_path":"blog/posts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"330658310","text":"import os\n\nHEADER_TYPES = (\".h\", \".hpp\", \".hxx\")\nSOURCE_TYPES = (\".c\", \".cpp\", \".cxx\")\nALL_TYPES = HEADER_TYPES + SOURCE_TYPES\n\ndef make_header(a_directory, a_filename, a_exclude):\n\ta_exclude.add(a_filename)\n\n\tout = open(a_directory + \"/\" + a_filename, \"w\", encoding=\"utf-8\")\n\tout.write(\"#pragma once\\n\")\n\tout.write(\"\\n\")\n\tout.write('#include \"SKSE/Impl/PCH.h\"\\n')\n\tout.write(\"\\n\")\n\n\ttmp = list()\n\tfor dirpath, dirnames, filenames in os.walk(a_directory):\n\t\trem = list()\n\t\tfor dirname in dirnames:\n\t\t\tif dirname in a_exclude:\n\t\t\t\trem.append(dirname)\n\t\tfor todo in rem:\n\t\t\tdirnames.remove(todo)\n\n\t\tfor filename in filenames:\n\t\t\tif filename not in a_exclude and filename.endswith(HEADER_TYPES):\n\t\t\t\tpath = os.path.join(dirpath, filename)\n\t\t\t\ttmp.append(os.path.normpath(path))\n\n\tfiles = list()\n\tfor file in tmp:\n\t\tfiles.append(file.replace(\"\\\\\", \"/\"))\n\n\tfiles.sort()\n\tfor file in files:\n\t\tout.write('#include \"')\n\t\tout.write(file)\n\t\tout.write('\"\\n')\n\ndef make_cmake():\n\ttmp = list()\n\tdirectories = (\"include\", \"src\")\n\tfor directory in directories:\n\t\tfor dirpath, dirnames, filenames in os.walk(directory):\n\t\t\tfor filename in filenames:\n\t\t\t\tif filename.endswith(ALL_TYPES):\n\t\t\t\t\tpath = os.path.join(dirpath, filename)\n\t\t\t\t\ttmp.append(os.path.normpath(path))\n\n\theaders = list()\n\tsources = list()\n\tfor file in tmp:\n\t\tname = file.replace(\"\\\\\", \"/\")\n\t\tif name.endswith(HEADER_TYPES):\n\t\t\theaders.append(name)\n\t\telif name.endswith(SOURCE_TYPES):\n\t\t\tsources.append(name)\n\n\tdef do_make(a_filename, a_varname, a_files):\n\t\tout = open(\"cmake/\" + a_filename + \".cmake\", \"w\", encoding=\"utf-8\")\n\t\tout.write(\"set(\" + a_varname + \" ${\" + a_varname + \"}\\n\")\n\n\t\tfor file in a_files:\n\t\t\tout.write(\"\\t\" + file + \"\\n\")\n\n\t\tout.write(\")\\n\")\n\n\tdo_make(\"headerlist\", \"headers\", headers)\n\tdo_make(\"sourcelist\", \"sources\", sources)\n\ndef main():\n\tcur = os.path.dirname(os.path.realpath(__file__))\n\tos.chdir(cur)\n\tmake_cmake()\n\n\tos.chdir(cur + \"/include\")\n\tmake_header(\"SKSE\", \"SKSE.h\", {\"Impl\"})\n\tmake_header(\"RE\", \"Skyrim.h\", {\"BSCoreTypes.h\", \"Offsets.h\", \"Offsets_NiRTTI.h\", \"Offsets_RTTI.h\", \"SFTypes.h\"})\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"ProjectGen.py","file_name":"ProjectGen.py","file_ext":"py","file_size_in_byte":2115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"372684199","text":"# -*- coding: utf-8 -*- #\n# Copyright 2015 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests of api_lib dataproc util methods.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\nfrom apitools.base.py import encoding\n\nfrom googlecloudsdk import calliope\n\nfrom googlecloudsdk.api_lib.dataproc import dataproc as dp\nfrom googlecloudsdk.api_lib.dataproc import exceptions\nfrom googlecloudsdk.api_lib.dataproc import util\nfrom tests.lib import sdk_test_base\nfrom tests.lib.surface.dataproc import base\nfrom tests.lib.surface.dataproc import unit_base\n\n\nclass UtilUnitTest(unit_base.DataprocUnitTestBase):\n \"\"\"Tests for dataproc util.\"\"\"\n\n def SetUp(self):\n self.dataproc_mock = dp.Dataproc(self.track)\n self.dataproc_mock._client = self.mock_client\n self.dataproc_mock._messages = self.messages\n\n\nclass UtilUnitTestBeta(UtilUnitTest, base.DataprocTestBaseBeta):\n\n def testBeta(self):\n self.assertEqual(self.messages, self._beta_messages)\n self.assertEqual(self.track, calliope.base.ReleaseTrack.BETA)\n\n def testPrintWorkflowMetadata(self):\n metadata = self.messages.WorkflowMetadata(\n template='test-template',\n state=self.messages.WorkflowMetadata.StateValueValuesEnum.RUNNING,\n createCluster=self.messages.ClusterOperation(operationId='create-id'),\n deleteCluster=self.messages.ClusterOperation(operationId='delete-id'),\n graph=self.messages.WorkflowGraph(nodes=[\n self.messages.WorkflowNode(\n jobId='job-id-1',\n stepId='001',\n state=self.messages.WorkflowNode.StateValueValuesEnum.RUNNING)\n ]))\n operations = {'createCluster': None, 'deleteCluster': None}\n status = {}\n errors = {}\n util.PrintWorkflowMetadata(metadata, status, operations, errors)\n self.assertEqual(operations['createCluster'], metadata.createCluster)\n self.assertEqual(operations['deleteCluster'], metadata.deleteCluster)\n self.assertTrue(not errors) # no errors\n self.assertEqual(status['job-id-1'],\n self.messages.WorkflowNode.StateValueValuesEnum.RUNNING)\n self.assertEqual(\n status['wt'],\n self.messages.WorkflowMetadata.StateValueValuesEnum.RUNNING)\n\n def testPrintWorkflowMetadataErrors(self):\n metadata = self.messages.WorkflowMetadata(\n template='test-template',\n state=self.messages.WorkflowMetadata.StateValueValuesEnum.RUNNING,\n createCluster=self.messages.ClusterOperation(\n operationId='create-id', error='create-error'),\n deleteCluster=self.messages.ClusterOperation(\n operationId='delete-id', error='delete-error'),\n graph=self.messages.WorkflowGraph(nodes=[\n self.messages.WorkflowNode(\n jobId='job-id-1',\n stepId='001',\n error='job-error',\n state=self.messages.WorkflowNode.StateValueValuesEnum.FAILED)\n ]))\n operations = {'createCluster': None, 'deleteCluster': None}\n status = {}\n errors = {}\n util.PrintWorkflowMetadata(metadata, status, operations, errors)\n self.assertEqual(operations['createCluster'], metadata.createCluster)\n self.assertEqual(operations['deleteCluster'], metadata.deleteCluster)\n self.assertEqual(errors['job-id-1'], 'job-error') # no errors\n self.assertEqual(status['job-id-1'],\n self.messages.WorkflowNode.StateValueValuesEnum.FAILED)\n\n def testWaitForWorkflowTemplateOperation(self):\n expected = self.MakeCompletedOperation()\n self.ExpectGetOperation()\n self.ExpectGetOperation()\n self.ExpectGetOperation(operation=expected)\n result = util.WaitForWorkflowTemplateOperation(self.dataproc_mock,\n self.MakeOperation(), 10, 0)\n self.assertEqual(result, expected)\n\n def testWaitForWorkflowTemplateOperationTimeout(self):\n operation = self.MakeOperation()\n exception_message = 'Operation [{0}] timed out.'.format(operation.name)\n with self.AssertRaisesExceptionMatches(exceptions.OperationTimeoutError,\n exception_message):\n util.WaitForWorkflowTemplateOperation(self.dataproc_mock, operation, 0, 1)\n\n def testWaitForWorkflowTemplateOperationError(self):\n operation = self.MakeOperation()\n rpc_error = self.MakeRpcError()\n self.ExpectGetOperation()\n self.ExpectGetOperation()\n self.ExpectGetOperation(operation=self.MakeCompletedOperation(\n error=rpc_error))\n exception_message = 'Operation [{0}] failed: {1}.'.format(\n operation.name, util.FormatRpcError(rpc_error))\n with self.AssertRaisesExceptionMatches(exceptions.OperationError,\n exception_message):\n util.WaitForWorkflowTemplateOperation(self.dataproc_mock, operation, 10,\n 0)\n\n def testWaitForWorkflowTemplateOperationCreateClusterError(self):\n operation = self.MakeCompletedOperation()\n operation = self.MakeCompletedOperation(\n createCluster={'error': 'create error.',\n 'operationId': 'test id'})\n self.ExpectGetOperation()\n self.ExpectGetOperation()\n self.ExpectGetOperation(operation=operation)\n exception_message = 'Operation [{0}] failed: {1}.'.format(\n 'test id', 'create error')\n with self.AssertRaisesExceptionMatches(exceptions.OperationError,\n exception_message):\n util.WaitForWorkflowTemplateOperation(self.dataproc_mock, operation, 10,\n 0)\n\n def testWaitForWorkflowTemplateOperationDeleteClusterError(self):\n operation = self.MakeCompletedOperation()\n operation = self.MakeCompletedOperation(\n deleteCluster={'error': 'delete error.',\n 'operationId': 'test id'})\n self.ExpectGetOperation()\n self.ExpectGetOperation()\n self.ExpectGetOperation(operation=operation)\n exception_message = 'Operation [{0}] failed: {1}.'.format(\n 'test id', 'delete error')\n with self.AssertRaisesExceptionMatches(exceptions.OperationError,\n exception_message):\n util.WaitForWorkflowTemplateOperation(self.dataproc_mock, operation, 10,\n 0)\n\n\nif __name__ == '__main__':\n sdk_test_base.main()\n","sub_path":"google-cloud-sdk/lib/tests/unit/api_lib/dataproc/util_test.py","file_name":"util_test.py","file_ext":"py","file_size_in_byte":6960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"321078764","text":"import json\nimport sys\nimport tempfile\n\nfrom distriopt import VirtualNetwork\nfrom distriopt.constants import *\nfrom distriopt.embedding import PhysicalNetwork\nfrom distriopt.embedding.algorithms import (\n EmbedBalanced,\n EmbedILP,\n EmbedPartition,\n EmbedGreedy,\n)\n\nMAPPERS={\"EmbedBalanced\":EmbedBalanced, \"EmbedILP\":EmbedILP, \"EmbedPartition\":EmbedPartition, \"EmbedGreedy\":EmbedGreedy}\n\ndef main():\n virtual_topo_file = sys.argv[1]\n physical_topo_file = sys.argv[2]\n mapper = sys.argv[3]\n\n virtual = VirtualNetwork.from_file(virtual_topo_file)\n physical = PhysicalNetwork.from_files(physical_topo_file)\n if mapper not in MAPPERS:\n raise ValueError(f\"{mapper} not in {MAPPERS}\")\n algo= MAPPERS[mapper]\n prob = algo(virtual, physical)\n time_solution, status = prob.solve()\n temp = tempfile.NamedTemporaryFile(delete=False)\n try:\n if status==1:\n # problem solved\n with open(temp.name,\"w\") as f:\n json.dump({\"mapping\":prob.solution.node_mapping},f)\n else:\n with open(temp.name, \"w\") as f:\n json.dump({\"Infeasible\": None}, f)\n\n\n print(temp.name, end=\"\")\n except:\n raise RuntimeError( \"problem in distrinet_runner.py\")\n\n\n\nif __name__ == '__main__':\n main()","sub_path":"MaxiNet/Frontend/distriopt_runner.py","file_name":"distriopt_runner.py","file_ext":"py","file_size_in_byte":1299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"467810127","text":"#!/usr/bin/env python3\n# coding=utf-8\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom functools import lru_cache\n\ndef synthetic_data_set():\n data = np.loadtxt('../data/classification.txt')\n\n X = np.array([[1, *x[:2]] for x in data])\n T = np.array([[int(x[2]), int(not x[2])] for x in data])\n\n return X, T\n\ndef old_faithful():\n data = np.loadtxt('../data/faithful.txt')\n\n X = np.array([[1, *x] for x in data])\n T = np.array([[int(x[0] < 3), int(not x[0] < 3)] for x in data])\n\n return X, T\n\ndef least_square(X, T):\n W = np.linalg.inv(np.dot(X.T, X)).dot(X.T).dot(T)\n return lambda x: ((W[1, 1] - W[1, 0]) * x + W[0, 1] - W[0, 0]) / (W[2, 0] - W[2, 1])\n\ndef logistic_sigmoid(a):\n return 1 / (1 + np.exp(-a))\n\ndef logistic(X, T):\n t = T.T[1]\n\n w = np.zeros(3)\n while 1:\n w_old = w\n\n Phi = X\n R = np.zeros((len(X), len(X)))\n y = np.zeros(len(X))\n for n in range(len(X)):\n y_n = logistic_sigmoid(np.dot(w_old.T, Phi[n]))\n y[n] = y_n\n R[n, n] = y[n] * (1 - y[n])\n\n z = np.dot(Phi, w_old) - np.linalg.inv(R).dot(y - t)\n w = np.linalg.inv(Phi.T.dot(R).dot(Phi)).dot(Phi.T).dot(R).dot(z)\n\n if np.linalg.norm(w_old) != 0 and np.linalg.norm(w - w_old) / np.linalg.norm(w_old) < 0.01:\n break\n\n return lambda x: (-w[1] * x - w[0]) / w[2]\n\nif __name__ == '__main__':\n fig, axes = plt.subplots(1, 2, figsize=(12, 6))\n\n X, T = synthetic_data_set()\n\n axes[0].set_title(\"synthetic data set\")\n axes[0].set_xlim(-3, 3)\n axes[0].set_ylim(-3, 3)\n axes[0].plot(X[T.T[0] == 1].T[1], X[T.T[0] == 1].T[2], 'bo')\n axes[0].plot(X[T.T[1] == 1].T[1], X[T.T[1] == 1].T[2], 'rx')\n\n f = least_square(X, T)\n axes[0].plot([-3, 3], [f(-3), f(3)], 'm-')\n\n f = logistic(X, T)\n axes[0].plot([-3, 3], [f(-3), f(3)], 'g-')\n\n\n X, T = old_faithful()\n\n axes[1].set_title(\"old faithful\")\n axes[1].set_xlim(1,6)\n axes[1].set_ylim(40, 100)\n axes[1].plot(X[T.T[0] == 1].T[1], X[T.T[0] == 1].T[2], 'bo')\n axes[1].plot(X[T.T[1] == 1].T[1], X[T.T[1] == 1].T[2], 'rx')\n\n f = least_square(X, T)\n axes[1].plot([1, 6], [f(1), f(6)], 'm-')\n\n # f = logistic(X, T)\n # axes[1,1].plot([-3, 3], [f(-3), f(3)], 'k-')\n\n plt.show()\n plt.close()\n","sub_path":"c4/src/fig4-4.py","file_name":"fig4-4.py","file_ext":"py","file_size_in_byte":2304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"479553408","text":"# File Name: spider_yield_from.py\n\nimport time\nimport os\nimport socket\nfrom urllib.parse import urlparse\nfrom selectors import DefaultSelector, EVENT_WRITE, EVENT_READ \n\nselector = DefaultSelector()\nstopped = False\n\nurls = ['https://dn-simplecloud.shiyanlou.com/ncn1.jpg',\n 'https://dn-simplecloud.shiyanlou.com/ncn110.jpg',\n 'https://dn-simplecloud.shiyanlou.com/ncn109.jpg',\n 'https://dn-simplecloud.shiyanlou.com/1548126810319.png',\n 'https://dn-simplecloud.shiyanlou.com/1517282865454.png',\n 'https://dn-simplecloud.shiyanlou.com/1543913883545.png',\n 'https://dn-simplecloud.shiyanlou.com/1502778396172.png',\n 'https://dn-simplecloud.shiyanlou.com/1540965522764.png',\n 'https://dn-simplecloud.shiyanlou.com/1546500900109.png',\n 'https://dn-simplecloud.shiyanlou.com/1547620906601.png'\n]\n\n\nclass Future:\n def __init__(self):\n self.value = None\n self._step_func = []\n\n def add_step_func(self, func):\n self._step_func.append(func)\n\n def set_value(self, value):\n self.value = value\n for func in self._step_func:\n func(self)\n\n # 实现 __iter__ 方法,Future 类的实例为可迭代对象\n def __iter__(self):\n # 该语句起到暂停协程的作用,并返回实例本身\n yield self \n # 该语句定义的返回值会赋给 yield from 语句等号前面的变量\n return self.value\n\n\n# AsyncSocket 类封装套接字,该类的实例拥有套接字的各种接口\n# 因为主要方法都是协程函数,所以该类以 Async 作为标识\nclass AsyncSocket:\n def __init__(self):\n self.sock = socket.socket()\n self.sock.setblocking(False)\n\n # 该方法用于向服务器发送连接请求并注册监听套接字的可写事件\n def connect(self, address):\n f = Future()\n try:\n self.sock.connect(address)\n except BlockingIOError:\n pass\n # 这是回调函数,服务器与客户端连接成功后自动执行\n def writable():\n f.set_value(None)\n # 注册监听客户端套接字的可写事件\n selector.register(self.sock.fileno(), EVENT_WRITE, writable)\n # 可迭代对象 f 为 Future 类的实例\n # 执行此行代码,程序会运行到 f.__iter__ 方法的 yield 语句处暂停\n # 将 yield 后面的对象返回给调用者并赋值给 step 方法内的 new_future 变量\n yield from f\n selector.unregister(self.sock.fileno())\n\n # 向服务器发送获取图片的请求\n def send(self, data):\n self.sock.send(data)\n\n # 该方法会多次执行,以获取服务器返回的数据片段\n def read(self):\n f = Future()\n # 这是回调函数,收到服务器传回的数据时自动运行\n def readable():\n f.set_value(self.sock.recv(4096))\n # 注册监听客户端套接字的可读事件\n selector.register(self.sock.fileno(), EVENT_READ, readable)\n # f.__iter__ 方法的返回值会赋值给 value 变量\n value = yield from f\n selector.unregister(self.sock.fileno())\n return value\n\n # 关闭客户端套接字\n def close(self):\n self.sock.close()\n\n\n# 爬虫类,该类的实例的 fetch 方法用于处理数据\n# 期间会调用 AsyncSocket 类的实例来完成数据获取的工作\nclass Crawler:\n def __init__(self, url):\n self._url = url\n self.url = urlparse(url)\n self.response = b''\n\n def fetch(self):\n # 将此变量设为全局变量,以便在函数内部修改\n global stopped\n self.time = time.time()\n # AsyncSocket 类的实例对象负责完成数据获取的工作\n sock = AsyncSocket()\n # 向服务器发送连接请求,协程会暂停到嵌套协程中的某个 yield 处\n yield from sock.connect((self.url.netloc, 80))\n data = 'GET {} HTTP/1.1\\r\\nHost: {}\\r\\nConnection: close\\r\\n\\r\\n \\\n '.format(self.url.path, self.url.netloc)\n sock.send(data.encode())\n # 不断循环以读取服务器返回的数据片段,直到数据返回空\n while True:\n # sock.read 方法会调用 Future 类的实例的 __iter__ 方法\n # __iter__ 方法的 return 值即服务器返回的数据片段会赋给 value 变量\n value = yield from sock.read()\n if value:\n self.response += value\n else:\n sock.close()\n with open('pic' + self.url.path, 'wb') as file:\n file.write(self.response.split(b'\\r\\n\\r\\n')[1])\n print(\"URL: {0}, 耗时: {1:.3f}s\".format(\n self._url, time.time() - self.time))\n urls.remove(self._url)\n if not urls:\n stopped = True\n break\n\n\nclass Task:\n def __init__(self, coro):\n self.coro = coro\n f = Future()\n self.step(f)\n\n def step(self, future):\n try:\n new_futrue = self.coro.send(future.value)\n except StopIteration:\n return\n new_futrue.add_step_func(self.step)\n\n\ndef loop():\n while not stopped:\n events = selector.select()\n for event_key, _ in events:\n callback = event_key.data\n callback()\n\n\ndef main():\n os.system('mkdir -p pic')\n start = time.time()\n for url in urls:\n crawler = Crawler(url)\n Task(crawler.fetch())\n loop()\n print(\"总共耗时: {:.3f}s\".format(time.time() - start))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"异步爬虫实现/spider_yield_from.py","file_name":"spider_yield_from.py","file_ext":"py","file_size_in_byte":5654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"649076619","text":"class ListNode(object):\n def __init__(self, val, next=None):\n self.val = val\n self.next = next\nclass Solution:\n def hasCycle(self, head):\n if not head or not head.next:\n return False\n slow = head\n fast = head\n while fast and fast.next:\n slow = slow.next\n fast = fast.next.next\n if slow is fast:\n return True\n return False\n\n def hasCycle1(self, head):\n start = head\n while start:\n start = start.next\n if start == head:\n return True\n return False\n#主函数\nif __name__ == \"__main__\":\n node1 = ListNode(-21)\n node2 = ListNode(10)\n node3 = ListNode(4)\n node4 = ListNode(5)\n node1.next = node2\n node2.next = node3\n node3.next = node4\n node4.next = node3\n # 创建对象\n solution = Solution()\n print(\"初始化的值是:\", [node1.val, node2.val, node3.val, node4.val])\n print(\"结果是:\", solution.hasCycle(node1))","sub_path":"数据结构练习/Python算法指南数据结构/121_带环链表.py","file_name":"121_带环链表.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"651486154","text":"from __future__ import division\nfrom collections import OrderedDict\nimport json\nimport OreCalculator\n\n\ndef ore(inputs, query_result_list):\n ore_class_list = []\n class_inputs = exp_duration_handler(inputs)\n\n for query in query_result_list:\n \"\"\"\n Loop over each \"row\" of the spreadsheet; this is determined by the SQLite query\n from user's inputs on the Exposure Scenario tab.\n \"\"\"\n #\n\n # NonCancerInputs\n activity = query['Activity']\n formulation = query['Formulation']\n app_equip = query['AppEquip']\n app_type = query['AppType']\n crop_target = query['Category']\n crop_name = inputs['exp_crop']\n app_rate = inputs['app_rate']['app_rate_' + query['Formulation']]\n app_rate_unit = query['AppRateUnit']\n area_treated = query['TreatedVal']\n area_treated_unit = query['TreatedUnit']\n active_ingredient = inputs['activeIngredient']\n\n # DermalNonCancer specific inputs\n abs_frac_dermal = class_inputs['dermal']['abs_frac']\n bw_dermal = class_inputs['dermal']['bw_adult']\n pod_dermal = class_inputs['dermal']['nc_POD']\n loc_dermal = class_inputs['dermal']['nc_LOC']\n # Dermal PPE (personal protection equipment)\n dermal_unit_exp_sl_no_G = query['DUESLNoG']\n dermal_unit_exp_sl_G = query['DUESLG']\n dermal_unit_exp_dl_g = query['DUEDLG']\n dermal_unit_exp_sl_G_crh = query['DUESLGCRH']\n dermal_unit_exp_dl_G_crh = query['DUEDLGCRH']\n dermal_unit_exp_ec = query['DUEEC']\n\n # InhalNonCancer specific inputs\n abs_frac_inhal = class_inputs['inhal']['abs_frac']\n bw_inhal = class_inputs['inhal']['bw_adult']\n pod_inhal = class_inputs['inhal']['nc_POD']\n loc_inhal = class_inputs['inhal']['nc_LOC']\n # Inhalation PPE (personal protection equipment)\n inhal_unit_exp_no_r = query['IUENoR']\n inhal_unit_exp_pf5r = query['IUEPF5R']\n inhal_unit_exp_pf10r = query['IUEPF10R']\n inhal_unit_exp_ec = query['IUEEC']\n\n # Source Columns\n sources = [query['SourceCategory'], query['SourceMRID'], query['SourceDescription'], query['SourceDER']]\n # source_category = query['SourceCategory']\n # source_mrid = query['SourceMRID']\n # source_description = query['SourceDescription']\n # source_der = query['SourceDER']\n\n # Create DermalNonCancer class instance\n dermal = OreCalculator.DermalNonCancer(\n activity, crop_target, app_rate, app_rate_unit, crop_name,\n loc_dermal, loc_inhal, area_treated, area_treated_unit, active_ingredient,\n formulation, app_equip, app_type,\n abs_frac_dermal, bw_dermal, pod_dermal,\n dermal_unit_exp_sl_no_G, dermal_unit_exp_sl_G, dermal_unit_exp_dl_g,\n dermal_unit_exp_sl_G_crh, dermal_unit_exp_dl_G_crh, dermal_unit_exp_ec\n )\n\n # Create InhalNonCancer class instance\n inhal = OreCalculator.InhalNonCancer(\n activity, crop_target, app_rate, app_rate_unit, crop_name,\n loc_dermal, loc_inhal, area_treated, area_treated_unit, active_ingredient,\n formulation, app_equip, app_type,\n abs_frac_inhal, bw_inhal, pod_inhal,\n inhal_unit_exp_no_r, inhal_unit_exp_pf5r, inhal_unit_exp_pf10r, inhal_unit_exp_ec\n )\n\n # Combined results?\n if inputs['expComboType'] != '1':\n if inputs['expComboType'] == '2': # Combined: Additive Dose\n combined = OreCalculator.CombinedDose(dermal, inhal).additive_dose()\n if inputs['expComboType'] == '3': # Combined: 1/MOE Approach\n combined = OreCalculator.CombinedDose(dermal, inhal).one_over_moe()\n if inputs['expComboType'] == '4': # Aggregate Risk Index\n combined = OreCalculator.CombinedDose(dermal, inhal).ari()\n ore_class_list.append((dermal, inhal, sources, combined))\n\n else: # Not combined\n ore_class_list.append((dermal, inhal, sources))\n\n ore_output = OreOutputFormatter(ore_class_list)\n output_dict = ore_output.get_output_dict()\n #\n\n return output_dict\n\n\ndef exp_duration_handler(inputs):\n \"\"\"\n Helper method to handle the Short, Intermediate, and Long term options\n\n ONLY SHORT TERM IS CURRENTLY ALLOWED ON THE FRONTEND\n\n :param inputs: dict\n :return: float\n \"\"\"\n\n class_inputs = {'dermal': {}, 'inhal': {}}\n type = '_st'\n if inputs['expDurationType_st']:\n\n type = '_st'\n if inputs['expDurationType_it']:\n\n type = '_it'\n if inputs['expDurationType_lt']:\n\n type = '_lt'\n\n class_inputs['dermal']['abs_frac'] = float(inputs['dermal_abs_frac' + type]) / 100.\n class_inputs['dermal']['bw_adult'] = inputs['bw_dermal_NC' + type]\n class_inputs['dermal']['nc_POD'] = inputs['dermal_NC_POD' + type]\n class_inputs['dermal']['nc_LOC'] = inputs['dermal_NC_LOC' + type]\n\n class_inputs['inhal']['abs_frac'] = float(inputs['inhalation_abs_frac' + type]) / 100.\n class_inputs['inhal']['bw_adult'] = inputs['bw_inhalation_NC' + type]\n class_inputs['inhal']['nc_POD'] = inputs['inhalation_NC_POD' + type]\n class_inputs['inhal']['nc_LOC'] = inputs['inhalation_NC_LOC' + type]\n\n return class_inputs\n\n\nclass OreOutputFormatter(object):\n def __init__(self, ore_class_list):\n \"\"\"\n [\n (, ),\n (, ),\n (, )\n ]\n \"\"\"\n\n self.dermal_class_list = []\n self.inhal_class_list = []\n self.sources_list = []\n self.combined_list = []\n\n for item in ore_class_list:\n self.dermal_class_list.append(item[0])\n self.inhal_class_list.append(item[1])\n self.sources_list.append(item[2])\n try:\n self.combined_list.append(item[3])\n except IndexError:\n pass\n\n self.output_dict = {}\n\n \"\"\" Example JSON schema:\n [\n 'mix_loader': {\n 'activity': \"M/L\",\n 'app_equip': 'Aerial',\n 'crop_target': \"Corn[field crop, high acreage]\",\n 'loc': {'dermal': '100', 'inhal': '100'},\n 'app_rate': '2',\n 'app_rate_unit': 'lb ai/A',\n 'area_treated': '1200',\n 'area_treated_unit': 'acre',\n 'dermal_unit_exp': ['220 [SL/No G]', '37.6 [SL/G]'],\n 'inhal_unit_exp': ['0.219 [No-R]', '0.219 [No-R]'],\n 'dermal_dose': ['1.65', '0.282'],\n 'dermal_moe': ['30', '180'],\n 'inhal_dose': ['0.00658', '0.00658'],\n 'inhal_moe': ['3800', '3800']\n },\n 'applicator': {\n 'activity': \"Aerial\",\n 'app_equip': 'Aerial',\n 'crop_target': \"Corn[field crop, high acreage]\",\n 'loc': {'dermal': '100', 'inhal': '100'},\n 'app_rate': '2',\n 'app_rate_unit': 'lb ai/A',\n 'area_treated': '1200',\n 'area_treated_unit': 'acre',\n 'dermal_unit_exp': ['2.06 [EC]'],\n 'inhal_unit_exp': ['0.043 [EC]'],\n 'dermal_dose': ['0.0156'],\n 'dermal_moe': ['3200'],\n 'inhal_dose': ['0.000148'],\n 'inhal_moe': ['170000']\n },\n 'flagger': {\n 'activity': \"Flagger\",\n 'app_equip': 'Aerial',\n 'crop_target': \"Corn[field crop, high acreage]\",\n 'loc': {'dermal': '100', 'inhal': '100'},\n 'app_rate': '2',\n 'app_rate_unit': 'lb ai/A',\n 'area_treated': '350',\n 'area_treated_unit': 'acre',\n 'dermal_unit_exp': ['11 [EC]'],\n 'inhal_unit_exp': ['0.35 [No-R]'],\n 'dermal_dose': ['0.0156'],\n 'dermal_moe': ['3200'],\n 'inhal_dose': ['0.000148'],\n 'inhal_moe': ['170000']\n }\n ]\n \"\"\"\n\n def get_output_dict(self):\n if len(self.output_dict) > 0:\n return self.output_dict\n else:\n self.dermal_formatter()\n self.inhal_formatter()\n self.sources_formatter()\n if len(self.combined_list) > 0:\n self.combined_formatter()\n\n return self.output_dict\n\n def dermal_formatter(self):\n \"\"\"\n Create shared inputs portion of output_dict for a row of results on Output page\n \"\"\"\n # Loop over the DermalNonCancer instances\n i = 1\n for exp_scenario in self.dermal_class_list: # Could be either class instances list, these are the shared inputs\n\n attr_dict = exp_scenario.ordered_dict(exp_scenario.get_ppe_increasing_order())\n #\n\n dermal_dict = {}\n\n dermal_unit_exp = []\n dermal_exp = []\n dermal_dose = []\n dermal_moe = []\n for k, v in attr_dict.items():\n\n if isinstance(attr_dict[k], OreCalculator.OreCalculator):\n # Attributes have been ordered by PPE to match the logic of the calculator\n #\n dermal_unit_exp.append(str(attr_dict[k].unit_exp) + \" [\" + k.upper() + \"]\")\n dermal_exp.append(str(attr_dict[k].exposure_conc))\n dermal_dose.append(str(attr_dict[k].dose_conc))\n dermal_moe.append(str(attr_dict[k].moe))\n elif attr_dict[k] != None and attr_dict[k] != \"No Data\":\n #\n dermal_dict[k] = attr_dict[k]\n\n dermal_dict['dermal_unit_exp'] = dermal_unit_exp\n dermal_dict['dermal_exp'] = dermal_exp\n dermal_dict['dermal_dose'] = dermal_dose\n dermal_dict['dermal_moe'] = dermal_moe\n\n # self.output_dict[exp_scenario.activity] = dermal_dict\n self.output_dict[str(i)] = dermal_dict\n i += 1\n\n def inhal_formatter(self):\n\n # Loop over the InhalNonCancer instances\n i = 1\n for exp_scenario in self.inhal_class_list:\n\n attr_dict = exp_scenario.ordered_dict(exp_scenario.get_ppe_increasing_order())\n #\n\n inhal_dict = {}\n\n inhal_unit_exp = []\n inhal_exp = []\n inhal_dose = []\n inhal_moe = []\n for k, v in attr_dict.items():\n if isinstance(attr_dict[k], OreCalculator.OreCalculator):\n # Attributes have been ordered by PPE to match the logic of the calculator\n #\n inhal_unit_exp.append(str(attr_dict[k].unit_exp) + \" [\" + k.upper() + \"]\")\n inhal_exp.append(str(attr_dict[k].exposure_conc))\n inhal_dose.append(str(attr_dict[k].dose_conc))\n inhal_moe.append(str(attr_dict[k].moe))\n\n inhal_dict['inhal_unit_exp'] = inhal_unit_exp\n inhal_dict['inhal_exp'] = inhal_exp\n inhal_dict['inhal_dose'] = inhal_dose\n inhal_dict['inhal_moe'] = inhal_moe\n\n # self.output_dict[exp_scenario.activity].update(inhal_dict)\n self.output_dict[str(i)].update(inhal_dict)\n i += 1\n\n def combined_formatter(self):\n i = 1\n for combined in self.combined_list:\n\n\n self.output_dict[str(i)].update(combined)\n i += 1\n\n def sources_formatter(self):\n i = 1\n for sources in self.sources_list:\n sources_dict = {\n 'source': {\n 'category': sources[0],\n 'mrid': sources[1],\n 'description': sources[2],\n 'der': sources[3],\n }\n }\n self.output_dict[str(i)].update(sources_dict)\n i += 1\n","sub_path":"REST_UBER/ore_rest/ore_rest_model.py","file_name":"ore_rest_model.py","file_ext":"py","file_size_in_byte":12037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"99586925","text":"import unittest\n\ndef MergeSort(arr):\n\n if len(arr) > 1:\n mid = (len(arr))//2\n MergeSort(arr[:mid])\n MergeSort(arr[mid:])\n Merge(arr,mid)\n\n return arr\n\ndef Merge(arr, mid):\n\n left = arr[:mid]\n right = arr[mid:]\n n1 = len(left)\n n2 = len(right)\n\n i = 0\n j = 0\n k = 0\n while i < n1 and j < n2:\n if i != n1 and left[i] <= right[j]:\n arr[k] = left[i]\n i +=1\n else:\n arr[k] = right[j]\n j +=1\n k +=1\n\n \"\"\"\n There is a chance that all the elements from the right array are copied\n to the result array and there are few elements in left array remaining to\n be copied. The following condition helps to copy the remaining elements\n from the left array.\n\n For Example lets take the input array : [2,4,5,7,1,2,3,6]\n And lets see when we are about to merge the first 4 and last 4 elements\n\n i.e.\n left array = [2, 4, 5, 7]\n right array = [1, 2, 3, 6]\n\n When the above loop is done all the elements from the right array are copied\n and element \"7\" from the left array remains. So the below loop will process\n that element\n \"\"\"\n while i < n1:\n arr[k] = left[i]\n i +=1\n k +=1\n\n \"\"\"\n There is a chance that all the elements from the left array are copied\n to the result array and there are few elements in right array remaining to\n be copied. The following condition helps to copy the remaining elements\n from the right array.\n \"\"\"\n while j < n2:\n arr[k] = right[j]\n j +=1\n k +=1\n\n\n\nclass testMsort(unittest.TestCase):\n def test_True(self):\n self.assertEqual(MergeSort([2,4,5,7,1,2,3,6]),[1,2,2,3,4,5,6,7])\n \nif __name__ == '__main__':\n unittest.main()\n","sub_path":"mergesort.py","file_name":"mergesort.py","file_ext":"py","file_size_in_byte":1799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"648670301","text":"import numpy as np\nimport argparse, os\nfrom KMeans_mod.data_helper import json_read\nfrom sklearn.cluster import KMeans\nfrom sklearn.cluster import AffinityPropagation\nfrom sklearn.cluster import MeanShift\nfrom sklearn.cluster import SpectralClustering\nfrom sklearn.cluster import AgglomerativeClustering\nfrom sklearn.cluster import DBSCAN\nfrom sklearn.mixture import GaussianMixture\nfrom sklearn import metrics as mr\nfrom gensim import models\nfrom gensim import corpora\nfrom sklearn.externals import joblib\n\nparser = argparse.ArgumentParser(description='BiLSTM-CRF for Chinese NER task')\nparser.add_argument('--embedding_dim', type=int, default=32, help='dimension of embedding matrix')\nargs = parser.parse_args()\n\ndata, labels = json_read('./Tweets.txt')\ndata_new = []\nfor i in data:\n\twhile len(i) < 20:\n\t\ti.append('')\n\t# print(i)\n\tdata_new.append(i)\ndictionary = corpora.Dictionary(data_new)\ncorpus = [dictionary.doc2bow(text) for text in data_new]\n# print(corpus)\nmodel = models.TfidfModel(corpus)\nmodel.save(\"my_model.tfidf\")\n\nmodel = models.TfidfModel.load('my_model.tfidf')\n\ntfidf_vec = []\nfor i in range(len(corpus)):\n\tstring = corpus[i]\n\tstring_tfidf = model[string]\n\ttfidf_vec.append(string_tfidf)\n# print(tfidf_vec)\n\ntf_idf = []\nfor i in tfidf_vec:\n\tli = [0]*5099\n\tfor j in i:\n\t\t# print(j[0])\n\t\tif len(j) < 2:continue\n\t\tli[j[0]] = j[1]\n\t# while len(temp)<20:\n\t# \ttemp.append(0)\n\t# print(li)\n\ttf_idf.append(li)\n# wordvector = np.array(wordvector).reshape([2472,640])\ntf_idf = (np.array(tf_idf))\n# print(int(len(tf_idf)*0.8))\n# train, val = tf_idf[:int(len(tf_idf)*0.8)], tf_idf[int(len(tf_idf)*0.8):]\n# label_train, label_test = labels[:int(len(tf_idf)*0.8)], labels[int(len(tf_idf)*0.8):]\n\nkmeans = KMeans(n_clusters=89)\ns = kmeans.fit_predict(tf_idf)\nprint('NML of KMeans:' + str(mr.normalized_mutual_info_score(labels, s)))\n\nAf = AffinityPropagation(preference=-50).fit_predict(tf_idf)\nprint('NML of AffinityPropagation:' + str(mr.normalized_mutual_info_score(labels, Af)))\n\nms = MeanShift(n_jobs=8).fit_predict(tf_idf)\nprint('NML of MeanShift:' + str(mr.normalized_mutual_info_score(labels, ms)))\n\nsc = SpectralClustering(n_clusters=89).fit_predict(tf_idf)\nprint('NML of SpectralClustering:' + str(mr.normalized_mutual_info_score(labels, sc)))\n\nac = AgglomerativeClustering(n_clusters=89).fit_predict(tf_idf)\nprint('NML of Ward hierarchical clustering:' + str(mr.normalized_mutual_info_score(labels, ac)))\n\ndb = DBSCAN().fit_predict(tf_idf)\nprint('NML of DBSCAN:' + str(mr.normalized_mutual_info_score(labels, db)))\n#\ngm = GaussianMixture(n_components=10).fit_predict(tf_idf)\nprint('NML of GaussianMixture:' + str(mr.normalized_mutual_info_score(labels, gm)))","sub_path":"KMeans_mod/Kmeans.py","file_name":"Kmeans.py","file_ext":"py","file_size_in_byte":2677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"505721568","text":"### Example 9-15: Reading Rebase data into a reversed dictionary\n###\n### See cgi/enzymes_for_site.py for the full example combining\n### Examples 9-15 through 18\n\ndef read_table(filename):\n table = {}\n linenum = 0\n with open(filename) as fil:\n for line in fil:\n linenum += 1\n enzyme, sequence = line.split()\n sequence = sequence.replace('^', '') # ignore cut sites\n if sequence in table:\n table[sequence].add(enzyme)\n else:\n table[sequence] = {enzyme} # first enzyme for sequence\n table.get(sequence, set()).add(enzyme)\n print(linenum)\n return table\n\nif __name__ == '__main__':\n tbl = read_table('../data/rebase-simple-table.txt')\n print(len(tbl))\n\n","sub_path":"chapter_examples/ch09_15.py","file_name":"ch09_15.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"99750122","text":"from django.utils import timezone\nfrom datetime import datetime\n\nfrom rest_framework import viewsets, response, status\nfrom rest_framework.decorators import action\nfrom rest_framework.response import Response\nfrom rest_framework.mixins import ListModelMixin, UpdateModelMixin, RetrieveModelMixin, CreateModelMixin\n\nfrom mood.models import MoodRecord\nfrom mood.serializers import (\n MoodRecordDetailSerializer,\n MoodRecordMiniSerializer,\n MoodRecordMonthSerializer,\n MoodRecordDaySerializer\n)\nfrom mood.constants import MoodType\n\nfrom backend.permissions import IsAuthenticated\n\n\nclass MoodRecordViewSet(\n viewsets.GenericViewSet,\n ListModelMixin,\n RetrieveModelMixin,\n UpdateModelMixin,\n CreateModelMixin\n):\n permission_classes = (IsAuthenticated,)\n\n def get_queryset(self):\n return MoodRecord.objects.filter(user=self.request.user).order_by('-id')\n\n def get_serializer_class(self):\n if self.action == 'list':\n return MoodRecordMiniSerializer\n if self.action == 'day':\n return MoodRecordDaySerializer\n if self.action == 'month':\n return MoodRecordMonthSerializer\n return MoodRecordDetailSerializer\n\n def list(self, request, *args, **kwargs):\n objs = self.get_queryset().filter(user=request.user)\n serializer = self.get_serializer(objs, many=True)\n return response.Response(serializer.data)\n\n def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n\n try:\n type = serializer.data.get('type')\n description = serializer.data.get('description')\n year = serializer.data.get('year', None)\n month = serializer.data.get('month', None)\n day = serializer.data.get('day', None)\n\n if year and month and day:\n create_time = timezone.get_default_timezone().localize(datetime(year=year, month=month, day=day))\n record = MoodRecord.objects.create(\n user=request.user,\n type=type,\n description=description,\n )\n record.created_at = create_time\n record.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n else:\n record = MoodRecord.objects.create(\n user=request.user,\n type=type,\n description=description,\n )\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n except Exception:\n return Response({'detail': '心情记录数据错误'}, status=status.HTTP_400_BAD_REQUEST)\n\n @action(detail=False, methods=['GET'])\n def day(self, request):\n serializer = self.get_serializer(data=request.GET)\n if not serializer.is_valid():\n error = '表单填写错误'\n else:\n year = serializer.validated_data['year']\n month = serializer.validated_data['month']\n day = serializer.validated_data['day']\n\n # fetch the latest mood record that day\n mood_record = self.get_queryset().filter(user=request.user).filter(\n created_at__year=year,\n created_at__month=month,\n created_at__day=day\n ).exclude(type=MoodType.GRATITUDE).first()\n mood_data = MoodRecordDetailSerializer(mood_record).data\n return Response(data=mood_data, status=status.HTTP_200_OK)\n return Response(data={'detail': error}, status=status.HTTP_400_BAD_REQUEST)\n\n @action(detail=False, methods=['GET'])\n def month(self, request):\n serializer = self.get_serializer(data=request.GET)\n if not serializer.is_valid():\n error = '表单填写错误'\n else:\n year = serializer.validated_data['year']\n month = serializer.validated_data['month']\n\n # get all mood records in a month\n mood_records = self.get_queryset().filter(user=request.user).filter(\n created_at__year=year,\n created_at__month=month,\n ).exclude(type=MoodType.GRATITUDE)\n\n # only reserve the latest record of each day\n mood_values = mood_records.values('id', 'description', 'type', 'created_at')\n tz = timezone.get_current_timezone()\n\n for i in range(len(mood_values)):\n mood_values[i]['created_at'] = tz.normalize(mood_values[i]['created_at'])\n\n # Remove duplicate: only keep the mood record with largest ID on that day\n day_set = set()\n for i in range(len(mood_values)):\n if mood_values[i]['created_at'].day in day_set:\n mood_records = mood_records.exclude(id=mood_values[i]['id'])\n else:\n day_set.add(mood_values[i]['created_at'].day)\n\n # get all gratitude journals in a month\n gratitude_journals = MoodRecord.objects.filter(user=request.user).filter(\n created_at__year=year,\n created_at__month=month,\n type=MoodType.GRATITUDE\n ).order_by('-created_at')\n\n mood_data = MoodRecordMiniSerializer(mood_records, many=True).data\n gratitude_data = MoodRecordDetailSerializer(gratitude_journals, many=True).data\n return Response(data={\n 'moodList': mood_data,\n 'gratitudeList': gratitude_data\n }, status=status.HTTP_200_OK)\n return Response(data={'detail': error}, status=status.HTTP_400_BAD_REQUEST)\n","sub_path":"backend/mood/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"104955188","text":"from django.contrib.auth.models import User\nfrom django.core.exceptions import ValidationError, ObjectDoesNotExist\n\nfrom family.models import Family\n\n\ndef validate_user_exists(value):\n try:\n User.objects.get(username=value)\n except ObjectDoesNotExist:\n raise ValidationError(f'No such user in the database. Please, check for spelling mistakes.')\n\n\ndef validate_family_exists(value):\n try:\n Family.objects.get(name=value)\n except ObjectDoesNotExist:\n raise ValidationError(f'No such family in the database. Please, check for spelling mistakes.')\n","sub_path":"family/validators.py","file_name":"validators.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"88132788","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Apr 4 18:31:56 2020\r\n\r\n@author: bijuangalees\r\n\"\"\"\r\n\r\nimport networkx as nx\r\nimport matplotlib.pyplot as plt\r\n\r\nG=nx.Graph()\r\nl=[1,2,3]\r\nG.add_nodes_from(l)\r\nG.add_edge(1,2)\r\nG.add_edge(2,3)\r\nG.add_edge(1,3)\r\nprint(G.nodes)\r\nprint(G.edges)\r\nnx.draw(G)\r\nplt.show()","sub_path":"networkx_n_nodes.py","file_name":"networkx_n_nodes.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"613763213","text":"import tkinter as tk\nfrom activation import activation\nfrom officescan import officescan\nfrom infi.systray import SysTrayIcon\nimport time\n\nclass MainUI:\n def __init__(self, activation, officesan, connection, install, activation_result, officescan_result,window):\n self.activation=activation\n self.officescan=officesan\n self.connection=connection\n self.install=install\n self.activation_result=activation_result\n self.officescan_result=officescan_result\n self.window=window\n\n\n #show the result\n # check_activation_result=\"The computer is not activated, it has tried to activate itself.\"\n # check_officescan_result=\"The computer virus pattern cannot be compared with the server because the officescan is not installed.\"\n def showUI(self):\n \n #title\n self.window.title(\"The results of checking activation and officescan\")\n #show title\n frame0 = tk.Frame(self.window) \n frame0.grid(row=0, column=0,columnspan=4) \n tk.Label(frame0, text=\"The results of checking activation and officescan\"+\"\\n\",font=('Arial', 13)).pack() \n #show activation_result\n if self.activation==1:\n activation_result=[\"OK\",\"green\"]\n else:\n activation_result=[\"Fail\",\"yellow\"]\n\n frame1 = tk.Frame(self.window) \n frame1.grid(row=1, column=0, sticky='w') \n tk.Label(frame1, text=\"Activation Check Result:\",width=25).pack(side='left') \n tk.Label(frame1, text=activation_result[0],bg=activation_result[1],width=8).pack(side='left')\n #show officescan_result\n if self.officescan==1:\n officescan_result=[\"OK\",\"green\"]\n else:\n officescan_result=[\"Fail\",\"yellow\"]\n\n frame2 = tk.Frame(self.window) \n frame2.grid(row=1, column=1, sticky='w') \n tk.Label(frame2, text=\"OfficeScan Check Result:\",width=25).pack(side='left')\n tk.Label(frame2, text=officescan_result[0],bg=officescan_result[1],width=8).pack(side='left') \n #show network_result\n if self.connection==1:\n network_result=[\"OK\",\"green\"]\n else:\n network_result=[\"Fail\",\"yellow\"]\n\n frame3 = tk.Frame(self.window) \n frame3.grid(row=2, column=0, sticky='w') \n tk.Label(frame3, text=\"Network Connection Status:\",width=25).pack(side='left') \n tk.Label(frame3, text=network_result[0],bg=network_result[1],width=8).pack(side='left') \n #show install_result\n if self.install==1:\n install_result=[\"OK\",\"green\"]\n else:\n install_result=[\"Fail\",\"yellow\"]\n\n frame4 = tk.Frame(self.window) \n frame4.grid(row=2, column=1, sticky='w') \n tk.Label(frame4, text=\"OfficeScan Install Status:\",width=25).pack(side='left')\n tk.Label(frame4, text=install_result[0],bg=install_result[1],width=8).pack(side='left')\n #show remark\n frame5 = tk.Frame(self.window) \n frame5.grid(row=3, column=0,columnspan=4,sticky='w') \n tk.Label(frame5, text=\"\\n\").pack(side='left')\n\n frame6 = tk.Frame(self.window) \n frame6.grid(row=4, column=0,columnspan=4, sticky='w') \n tk.Label(frame6, text=\"Remark:\").pack(side='left')\n\n frame7 = tk.Frame(self.window) \n frame7.grid(row=5, column=0,columnspan=4, sticky='w') \n tk.Label(frame7, text=self.activation_result).pack(side='left')\n\n frame8 = tk.Frame(self.window) \n frame8.grid(row=6, column=0,columnspan=4, sticky='w') \n tk.Label(frame8, text=self.officescan_result).pack(side='left')\n \n\nif __name__ == \"__main__\":\n \n #check activation\n os_activation=activation()\n os_activation.close_dialog_box()\n os_activation.is_activated()\n if os_activation.activation==1:\n check_activation_result=\"The computer has been activated\"\n else:\n os_activation.activate()\n check_activation_result=\"The computer is not activated, it has tried to activate itself.\"\n \n #check officescan\n check_officescan=officescan()\n server_ptn=check_officescan.get_officescan_server_ptn()\n client_ptn=check_officescan.get_officescan_client_ptn()\n if server_ptn==client_ptn or server_ptn==client_ptn+200 or server_ptn==client_ptn+400 or server_ptn+200==client_ptn or server_ptn+400==client_ptn:\n check_officescan.virus_pattern_identical=1\n check_officescan_result=\"The computer virus pattern is similar to the server.\"\n elif check_officescan.network_connection==0:\n check_officescan_result=\"The computer virus pattern cannot be compared with the server because the network cannot connect.\"\n elif check_officescan.install_officescan==0:\n check_officescan_result=\"The computer virus pattern cannot be compared with the server because the officescan is not installed.\"\n else:\n check_officescan_result=\"The computer virus pattern is not similar to the server.\"\n\n #show_UI\n def show(systray):\n window=tk.Tk()\n UI = MainUI(os_activation.activation,check_officescan.virus_pattern_identical,check_officescan.network_connection,check_officescan.install_officescan,check_activation_result,check_officescan_result,window)\n UI.showUI()\n window.mainloop()\n\n if os_activation.activation==1 and check_officescan.virus_pattern_identical==1:\n pass\n else: \n menu_options = ((\"Check Computer\", None, show),)\n systray = SysTrayIcon(\"icon.ico\", \"Check Computer\", menu_options)\n systray.start()","sub_path":"CheckComputer/CheckComputer 2.1/CheckComputer_2.1.1.py","file_name":"CheckComputer_2.1.1.py","file_ext":"py","file_size_in_byte":5519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"645270300","text":"import rosbag\nimport rospy\nimport tf\nimport geometry_msgs\n\n\ndef bag_type_to_geometry_msgs(msg_tf):\n casted_msg = geometry_msgs.msg.TransformStamped()\n casted_msg.header = msg_tf.header\n casted_msg.child_frame_id = msg_tf.child_frame_id\n casted_msg.transform.translation.x = msg_tf.transform.translation.x\n casted_msg.transform.translation.y = msg_tf.transform.translation.y\n casted_msg.transform.translation.z = msg_tf.transform.translation.z\n casted_msg.transform.rotation.x = msg_tf.transform.rotation.x\n casted_msg.transform.rotation.y = msg_tf.transform.rotation.y\n casted_msg.transform.rotation.z = msg_tf.transform.rotation.z\n casted_msg.transform.rotation.w = msg_tf.transform.rotation.w\n return casted_msg\n\n\ndef fill_transformer(bag):\n print(\"Loading tfs into transformer...\")\n tf_t = tf.Transformer(True, rospy.Duration(3600))\n for topic, msg, t in bag.read_messages(topics=[\"/tf\"]):\n for msg_tf in msg.transforms:\n casted_msg = bag_type_to_geometry_msgs(msg_tf)\n tf_t.setTransform(casted_msg)\n print(\"Finished\")\n return tf_t\n\n\ndef main():\n # bag = rosbag.Bag(\"/home/satco/PycharmProjects/PoseCNN/bag/dataset_one_box.bag\")\n bag = rosbag.Bag(\"/home/satco/PycharmProjects/PoseCNN/bag/test.bag\")\n # topics = [\"/camera1/color/image_raw\", \"/camera2/color/image_raw\"]\n topics = [\"/camera/color/image_raw\"]\n tf_t = fill_transformer(bag)\n (trans, rot) = tf_t.lookupTransform(\"vicon\", \"box11\", rospy.Time(1537799697, 297481))\n\n with open(\"data/box_positions.txt\", \"w\") as f:\n f.write(str(trans + rot) + \"\\n\")\n\n counter = 1\n f = open(\"data/camera1_positions.txt\", \"w\")\n for topic, msg, t in bag.read_messages(topics=topics, start_time=rospy.Time(1537799716, 30952)):\n if topic == \"/camera/color/image_raw\":\n try:\n (trans, rot) = tf_t.lookupTransform(\"vicon\", \"camera\", msg.header.stamp)\n except tf.ExtrapolationException:\n print(\"Skipped \" + str(counter) + \" lookups\")\n counter += 1\n f.write(str(trans + rot) + \"\\n\")\n f.close()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"generate_dataset/export_tf.py","file_name":"export_tf.py","file_ext":"py","file_size_in_byte":2169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"92397480","text":"from app.models import Category\nimport random\nimport datetime\n\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\n\n\n@api_view(['POST'])\ndef create_category(request):\n \"\"\"\n {\n name:string,\n description:string,\n\n }\n \"\"\"\n try:\n category= Category(\n name=request.data['name'],\n description=request.data['description'],\n created_at=datetime.date.today(),\n updated_at=datetime.date.today()\n )\n\n category.save()\n success = {\n 'data':[],\n 'message':'success',\n 'status_code':200\n }\n\n return Response(success) \n\n except:\n success = {\n 'data':{\n 'name':request.data['name'],\n 'description':request.data['description']\n },\n 'message':'error',\n 'status_code':500\n }\n return Response(success) \n\n\n\n@api_view(['GET'])\ndef get_all_categories(request):\n \"\"\"\n \n \"\"\"\n events = Category.objects.all()\n data=[]\n for event in events:\n values={\n 'id':event.id,\n 'title':event.name,\n }\n\n data.append(values)\n\n success ={\n 'data':data,\n 'message':'success',\n 'status_code':200\n } \n \n return Response(success) \n\n\n\n@api_view(['GET'])\ndef sample_test(request):\n data=[]\n data.append({\"distance\":20})\n return Response(data) ","sub_path":"app/category/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":1563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"94027537","text":"from rest_framework import serializers\nfrom .models import user,country,organization,contest ,user_contest_rank\nfrom .models import organization_contest_participation, country_contest_participation\n\nclass CountrySerializer(serializers.ModelSerializer):\n class Meta:\n model = country\n fields = [\n 'name',\n 'total'\n ]\n\nclass OrganizationSerializer(serializers.ModelSerializer):\n class Meta:\n model = organization\n fields = [\n 'name',\n 'total'\n ]\n\nclass ContestSerializer(serializers.ModelSerializer):\n Type = serializers.CharField(source='get_Type_display') \n class Meta:\n model = contest\n fields = [\n 'name',\n 'contestId',\n 'duration',\n 'startTime',\n 'participants',\n 'Type'\n ]\n\nclass contestRankSerializer(serializers.ModelSerializer):\n class Meta:\n model = user_contest_rank\n fields = [\n 'worldRank',\n 'countryRank',\n 'organizationRank'\n ]\n\nclass UserSerializer(serializers.ModelSerializer):\n country = CountrySerializer()\n organization = OrganizationSerializer()\n contestRank = contestRankSerializer(many=True)\n class Meta:\n model= user\n fields = [\n 'name',\n 'handle',\n 'rating',\n 'maxRating',\n 'rank',\n 'maxRank',\n 'worldRank',\n 'countryRank',\n 'organizationRank',\n 'country',\n 'organization',\n 'photoUrl',\n 'contestRank'\n ]","sub_path":"codedigger/codeforces/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"356806237","text":"#!/usr/bin/env python\n# encoding: utf-8\n\n\"\"\"\n@version: ??\n@author: thsheep\n@file: token.py\n@time: 2018/5/3 11:05\n@site: 获取access_token\n\"\"\"\n# ┏┓ ┏┓\n# ┏┛┻━━━┛┻┓\n# ┃ ┃\n# ┃ ━ ┃\n# ┃ ┳┛ ┗┳ ┃\n# ┃ ┃\n# ┃ ┻ ┃\n# ┃ ┃\n# ┗━┓ ┏━┛Codes are far away from bugs with the animal protecting\n# ┃ ┃ 神兽保佑,代码无bug\n# ┃ ┃\n# ┃ ┗━━━┓\n# ┃ ┣┓\n# ┃ ┏┛\n# ┗┓┓┏━┳┓┏┛\n# ┃┫┫ ┃┫┫\n# ┗┻┛ ┗┻┛\nimport json\nimport redis\nimport requests\n\nfrom urllib.parse import urlencode\n\nfrom common.error import GetKeyError\n\n\nclass Token(object):\n\n def __init__(self, config):\n \"\"\"初始化配置文件\n :param config:\n \"\"\"\n if not isinstance(config, dict):\n raise TypeError(f\"{config} 必须是字典\")\n\n redis_config = config.get('redis')\n\n if not isinstance(redis_config, dict):\n raise TypeError(f\"{redis_config} 必须是字典\")\n\n self.token_config = config.get('token_config')\n\n redis_config.update({'decode_responses': True})\n self.redis_conn = redis.StrictRedis(**redis_config)\n\n def generate_access_token(self, app_name):\n \"\"\"获取access_token\n :param corpid:企业ID\n :param corpsecret:应用凭证秘钥\n :return:\n \"\"\"\n token_config = self.token_config.get(app_name)\n query = urlencode(token_config)\n access_token_url = f\"https://qyapi.weixin.qq.com/cgi-bin/gettoken?{query}\"\n response = requests.get(access_token_url).json()\n if response.get('errcode') == 0:\n access_token = response.get('access_token')\n expires_in = response.get('expires_in')\n return access_token, expires_in\n return False, False\n\n def get_access_token(self, app_name):\n \"\"\"\n :param app_name: 需要获取token的应用名称\n :return:\n \"\"\"\n redis_key= f\"token:{app_name}\"\n token = self.redis_conn.get(redis_key)\n if token:\n return token\n token, expires_in = self.generate_access_token(app_name)\n if token:\n self.redis_conn.setex(redis_key, expires_in, token)\n return self.redis_conn.get(redis_key)\n\n","sub_path":"common/token.py","file_name":"token.py","file_ext":"py","file_size_in_byte":2625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"352562245","text":"import functools\nfrom flask import (\n Blueprint,\n flash,\n render_template,\n make_response,\n request,\n g,\n url_for,\n redirect,\n)\nfrom werkzeug.exceptions import BadRequest\nfrom frontend.forms.auth import (\n SignupForm,\n LoginForm,\n)\nfrom frontend.helpers.auth import (\n create_user,\n get_jwt,\n refresh_jwt,\n is_jwt_expired,\n is_jwt_about_to_expire,\n get_user_from_jwt,\n)\nfrom frontend.helpers.utils import set_form_errors\nfrom frontend import get_backend_api\n\nauth = Blueprint('auth', __name__, url_prefix='/auth')\n\n\n@auth.route('/signup', methods=['GET', 'POST'])\ndef signup():\n form = SignupForm()\n if form.validate_on_submit():\n try:\n create_user(\n email=form.email.data,\n first_name=form.first_name.data,\n last_name=form.last_name.data,\n password=form.password.data,\n )\n flash(\"Thanks for signing up!\")\n return redirect(url_for('auth.login'))\n except BadRequest as e:\n if isinstance(e.description, str):\n form.non_field_errors = [e.description]\n else:\n errors = e.description\n set_form_errors(form=form, errors=errors)\n return render_template('auth/signup.html', form=form)\n\n\n@auth.route('/login', methods=['GET', 'POST'])\ndef login():\n form = LoginForm()\n if form.validate_on_submit():\n try:\n g.jwt = get_jwt(email=form.email.data, password=form.password.data)\n return redirect(url_for('todo.list_tasks'))\n except BadRequest as e:\n if isinstance(e.description, str):\n form.non_field_errors = [e.description]\n else:\n errors = e.description\n set_form_errors(form=form, errors=errors)\n return render_template('auth/login.html', form=form)\n\n\n@auth.route('/logout', methods=['GET'])\ndef logout():\n g.jwt = ''\n return make_response(render_template('auth/logout.html'))\n\n\n@auth.before_app_request\ndef load_jwt():\n token = request.cookies.get('jwt', '')\n session = get_backend_api()\n auth_header = f'Bearer {token}' if token else ''\n session.headers.update({'Authorization': auth_header})\n g.user = get_user_from_jwt(token) if token else None\n g.jwt = token\n\n\n@auth.after_app_request\ndef refresh_jwt_if_about_to_expire(response):\n token = g.jwt\n if token:\n if is_jwt_expired(token):\n flash(\"You have been logged out due to inactivity.\")\n token = ''\n elif is_jwt_about_to_expire(token):\n try:\n token = refresh_jwt(token)\n except BadRequest:\n flash(\"You have been logged out due to inactivity.\")\n token = ''\n response.set_cookie('jwt', token)\n return response\n\n\ndef login_required(view):\n @functools.wraps(view)\n def wrapped_view(**kwargs):\n if g.user is None:\n return redirect(url_for('auth.login'))\n return view(**kwargs)\n\n return wrapped_view\n","sub_path":"frontend/views/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":3067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"624450849","text":"'''\\\r\nAn asynchronous task-queue built on top :class:`pulsar.Application` framework.\r\nBy creating :class:`Job` classes in a similar way you can do for celery_,\r\nthis application gives you all you need for running them with very\r\nlittle setup effort::\r\n\r\n from pulsar.apps import tasks\r\n\r\n tq = tasks.TaskQueue(tasks_path='path.to.tasks.*')\r\n tq.start()\r\n\r\n.. _tasks-actions:\r\n\r\nTutorial\r\n==============\r\n\r\nActions\r\n~~~~~~~~~~~~~~~\r\n\r\nThe :class:`Taskqueue` application adds the following\r\n:ref:`remote actions ` to its workers:\r\n\r\n* **addtask** to add a new task to the task queue::\r\n\r\n send(taskqueue, 'addtask', jobname, task_extra, *args, **kwargs)\r\n\r\n * *jobname*: the name of the :class:`Job` to run.\r\n * *task_extra*: dictionary of extra parameters to pass to the :class:`Task`\r\n constructor. Usually a empty dictionary.\r\n * *args*: positional arguments for the :ref:`job callable `.\r\n * *kwargs*: key-valued arguments for the :ref:`job callable `.\r\n\r\n* **addtask_noack** same as **addtask** but without acknowleding the sender::\r\n\r\n send(taskqueue, 'addtask_noack', jobname, task_extra, *args, **kwargs)\r\n \r\n* **get_task** retrieve task information. This can be already executed or not.\r\n The implementation is left to the :meth:`Task.get_task` method::\r\n \r\n send(taskqueue, 'get_task', id)\r\n \r\n* **get_tasks** retrieve information for tasks which satisfy the filtering.\r\n The implementation is left to the :meth:`Task.get_tasks` method::\r\n \r\n send(taskqueue, 'get_tasks', **filters)\r\n \r\n \r\nJobs\r\n~~~~~~~~~~~~~~~~\r\n\r\nAn application implements several :class:`Job`\r\nclasses which specify the way each :class:`Task` is run.\r\nEach job class is a task-factory, therefore, a task is always associated\r\nwith one job, which can be of two types:\r\n\r\n* standard (:class:`Job`)\r\n* periodic (:class:`PeriodicJob`)\r\n\r\n.. _job-callable:\r\n\r\nTo define a job is simple, subclass from :class:`Job` and implement the\r\n**job callable method**::\r\n\r\n from pulsar.apps import tasks\r\n\r\n class Addition(tasks.Job):\r\n\r\n def __call__(self, consumer, a, b):\r\n \"Add two numbers\"\r\n return a+b\r\n \r\n class Sampler(tasks.Job):\r\n\r\n def __call__(self, consumer, sample, size=10):\r\n ...\r\n\r\nThe *consumer*, instance of :class:`TaskConsumer`, is passed by the\r\n:class:`TaskQueue` and should always be the first positional argument in the\r\ncallable function.\r\nThe remaining positional arguments and/or key-valued parameters are needed by\r\nyour job implementation.\r\n\r\nTask Class\r\n~~~~~~~~~~~~~~~~~\r\n\r\nBy default, tasks are constructed using an in-memory implementation of\r\n:class:`Task`. To use a different implementation, for example one that\r\nsaves tasks on a database, subclass :class:`Task` and pass the new class\r\nto the :class:`TaskQueue` constructor::\r\n\r\n from pulsar.apps import tasks\r\n\r\n class TaskDatabase(tasks.Task):\r\n\r\n def on_created(self):\r\n return save2db(self)\r\n\r\n def on_received(self):\r\n return save2db(self)\r\n\r\n def on_start(self):\r\n return save2db(self)\r\n\r\n def on_finish(self):\r\n return save2db(self)\r\n\r\n @classmethod\r\n def get_task(cls, id, remove = False):\r\n return taskfromdb(id)\r\n\r\n\r\n tq = tasks.TaskQueue(task_class=TaskDatabase, tasks_path='path.to.tasks.*')\r\n tq.start()\r\n\r\n\r\n.. _tasks-callbacks:\r\n\r\nTask callbacks\r\n~~~~~~~~~~~~~~~~~~~\r\n\r\nWhen creating your own :class:`Task` class all you need to override are the four\r\ntask callbacks:\r\n\r\n* :meth:`Task.on_created` called by the taskqueue when it creates a new task\r\n instance.\r\n* :meth:`Task.on_received` called by a worker when it receives the task.\r\n* :meth:`Task.on_start` called by a worker when it starts the task.\r\n* :meth:`Task.on_finish` called by a worker when it ends the task.\r\n\r\n\r\nand :meth:`Task.get_task` classmethod for retrieving tasks instances.\r\n\r\n.. _task-state:\r\n\r\nTask states\r\n~~~~~~~~~~~~~\r\n\r\nA :class:`Task` can have one of the following :attr:`Task.status` string:\r\n\r\n* ``PENDING`` A task waiting for execution and unknown.\r\n* ``RETRY`` A task is retrying calculation.\r\n* ``RECEIVED`` when the task is received by the task queue.\r\n* ``STARTED`` task execution has started.\r\n* ``REVOKED`` the task execution has been revoked. One possible reason could be\r\n the task has timed out.\r\n* ``UNKNOWN`` task execution is unknown.\r\n* ``FAILURE`` task execution has finished with failure.\r\n* ``SUCCESS`` task execution has finished with success.\r\n\r\n\r\n.. attribute:: FULL_RUN_STATES\r\n\r\n The set of states for which a :class:`Task` has run:\r\n ``FAILURE`` and ``SUCCESS``\r\n\r\n.. attribute:: READY_STATES\r\n\r\n The set of states for which a :class:`Task` has finished:\r\n ``REVOKED``, ``FAILURE`` and ``SUCCESS``\r\n\r\n\r\nQueue\r\n~~~~~~~~~~~~~~\r\n\r\nBy default the queue is implemented using the multiprocessing.Queue\r\nfrom the standard python library. To specify a different queue you can\r\nuse the ``task-queue`` flag from the command line::\r\n\r\n python myserverscript.py --task-queue dotted.path.to.callable\r\n\r\nor by setting the ``task_queue_factory`` parameter in the config file\r\nor in the :class:`TaskQueue` constructor.\r\n\r\n\r\n.. _celery: http://celeryproject.org/\r\n'''\r\nimport os\r\nfrom datetime import datetime\r\n\r\nimport pulsar\r\nfrom pulsar import to_string, safe_async\r\nfrom pulsar.utils.importer import module_attribute\r\n\r\nfrom .exceptions import *\r\nfrom .task import *\r\nfrom .models import *\r\nfrom .scheduler import Scheduler\r\nfrom .states import *\r\nfrom .rpc import *\r\n\r\n\r\nclass TaskQueueFactory(pulsar.Setting):\r\n app = 'cpubound'\r\n name = \"task_queue_factory\"\r\n section = \"Task Consumer\"\r\n flags = [\"-q\", \"--task-queue\"]\r\n default = \"pulsar.Queue\"\r\n desc = \"\"\"The task queue factory to use.\"\"\"\r\n\r\n def get(self):\r\n return module_attribute(self.value)\r\n\r\n\r\nclass TaskSetting(pulsar.Setting):\r\n virtual = True\r\n app = 'tasks'\r\n\r\n\r\nclass TaskPath(TaskSetting):\r\n name = \"tasks_path\"\r\n section = \"Task Consumer\"\r\n meta = \"STRING\"\r\n validator = pulsar.validate_list\r\n cli = [\"--tasks-path\"]\r\n default = ['pulsar.apps.tasks.testing']\r\n desc = \"\"\"\\\r\n List of python dotted paths where tasks are located.\r\n \"\"\"\r\n\r\n\r\nclass CPUboundServer(pulsar.Application):\r\n '''A CPU-bound application server.'''\r\n _app_name = 'cpubound'\r\n\r\n def get_ioqueue(self):\r\n '''Return the distributed task queue which produces tasks to\r\nbe consumed by the workers.'''\r\n if self.local.ioqueue is None:\r\n self.local.ioqueue = self.cfg.task_queue_factory()\r\n return self.local.ioqueue\r\n\r\n def request_instance(self, worker, fd, request):\r\n return request\r\n\r\n def on_event(self, worker, fd, request):\r\n request = self.request_instance(worker, fd, request)\r\n if request is not None:\r\n c = self.local.current_requests\r\n if c is None:\r\n c = []\r\n self.local.current_requests = c\r\n c.append(request)\r\n yield safe_async(request.start, args=(worker,))\r\n try:\r\n c.remove(request)\r\n except ValueError:\r\n pass\r\n\r\n################################################# TASKQUEUE COMMANDS\r\ntaskqueue_cmnds = set()\r\n\r\n@pulsar.command(internal=True, commands_set=taskqueue_cmnds)\r\ndef addtask(client, actor, caller, jobname, task_extra, *args, **kwargs):\r\n kwargs.pop('ack', None)\r\n return actor.app._addtask(actor, caller, jobname, task_extra, True,\r\n args, kwargs)\r\n\r\n@pulsar.command(internal=True, ack=False, commands_set=taskqueue_cmnds)\r\ndef addtask_noack(client, actor, caller, jobname, task_extra, *args, **kwargs):\r\n kwargs.pop('ack', None)\r\n return actor.app._addtask(actor, caller, jobname, task_extra, False,\r\n args, kwargs)\r\n\r\n@pulsar.command(internal=True, commands_set=taskqueue_cmnds)\r\ndef save_task(client, actor, caller, task):\r\n #import time\r\n #time.sleep(0.1)\r\n return actor.app.scheduler.save_task(task)\r\n\r\n@pulsar.command(internal=True, commands_set=taskqueue_cmnds)\r\ndef delete_tasks(client, actor, caller, ids):\r\n return actor.app.scheduler.delete_tasks(ids)\r\n\r\n@pulsar.command(commands_set=taskqueue_cmnds)\r\ndef get_task(client, actor, id):\r\n return actor.app.scheduler.get_task(id)\r\n\r\n@pulsar.command(commands_set=taskqueue_cmnds)\r\ndef get_tasks(client, actor, **parameters):\r\n return actor.app.scheduler.get_tasks(**parameters)\r\n\r\n@pulsar.command(commands_set=taskqueue_cmnds)\r\ndef job_list(client, actor, jobnames=None):\r\n return list(actor.app.job_list(jobnames=jobnames))\r\n\r\n@pulsar.command(commands_set=taskqueue_cmnds)\r\ndef next_scheduled(client, actor, jobnames=None):\r\n return actor.app.scheduler.next_scheduled(jobnames=jobnames)\r\n\r\n@pulsar.command(commands_set=taskqueue_cmnds)\r\ndef wait_for_task(client, actor, id, timeout=3600):\r\n # wait for a task to finish for at most timeout seconds\r\n scheduler = actor.app.scheduler\r\n return scheduler.task_class.wait_for_task(scheduler, id, timeout)\r\n\r\n\r\nclass TaskQueue(CPUboundServer):\r\n '''A :class:`pulsar.CPUboundServer` for consuming\r\ntasks and managing scheduling of tasks.\r\n\r\n.. attribute:: registry\r\n\r\n Instance of a :class:`JobRegistry` containing all\r\n registered :class:`Job` instances.\r\n'''\r\n _app_name = 'tasks'\r\n cfg_apps = ('cpubound',)\r\n cfg = {'timeout': '3600', 'backlog': 1}\r\n commands_set = taskqueue_cmnds\r\n task_class = TaskInMemory\r\n '''The :class:`Task` class for storing information about task execution.\r\n\r\nDefault: :class:`TaskInMemory`\r\n'''\r\n scheduler_class = Scheduler\r\n '''The scheduler class. Default: :class:`Scheduler`.'''\r\n\r\n @property\r\n def scheduler(self):\r\n '''A :class:`Scheduler` which send task to the task queue and\r\nproduces of periodic tasks according to their schedule of execution.\r\n\r\nAt every event loop, the :class:`pulsar.ApplicationMonitor` running\r\nthe :class:`TaskQueue` application, invokes the :meth:`Scheduler.tick`\r\nwhich check for tasks to be scheduled.\r\n\r\nCheck the :meth:`TaskQueue.monitor_task` callback\r\nfor implementation.'''\r\n return self.local.scheduler\r\n\r\n def request_instance(self, worker, fd, request):\r\n return self.scheduler.get_task(request)\r\n\r\n def monitor_task(self, monitor):\r\n '''Override the :meth:`pulsar.Application.monitor_task` callback\r\nto check if the scheduler needs to perform a new run.'''\r\n s = self.scheduler\r\n if s:\r\n if s.next_run <= datetime.now():\r\n s.tick()\r\n\r\n def handler(self):\r\n # Load the application callable, the task consumer\r\n if self.callable:\r\n self.callable()\r\n self.local.scheduler = Scheduler(self.get_ioqueue(),\r\n self.task_class,\r\n self.cfg.tasks_path,\r\n logger=self.logger,\r\n schedule_periodic=True)\r\n return self\r\n\r\n def monitor_handler(self):\r\n return self.handler()\r\n\r\n def job_list(self, jobnames=None):\r\n return self.scheduler.job_list(jobnames=jobnames)\r\n\r\n @property\r\n def registry(self):\r\n global registry\r\n return registry\r\n\r\n # Internals\r\n def _addtask(self, monitor, caller, jobname, task_extra, ack, args, kwargs):\r\n task = self.scheduler.queue_task(jobname, args, kwargs, **task_extra)\r\n if ack:\r\n return task\r\n","sub_path":"pulsar/apps/tasks/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":11647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"559541564","text":"from data_science_layer.reporting.abstract_report import AbstractReport\nfrom data_science_layer.pipeline.abstract_pipline import AbstractPipeline\nfrom data_science_layer.machine_learning.extra_trees_classifier import ExtraTreesClassifierModel\nimport pandas as pd\nimport numpy as np\nimport pkg_resources\n\n\nclass ExtraTreesClassifierFeatureWeightsReport(AbstractReport):\n\n sub_folder = 'reports'\n\n def report(self, pipeline: AbstractPipeline):\n x = pipeline.train\n y = pipeline.train_y\n\n x['RANDOM_NUMBER'] = np.random.normal(0, 1, x.shape[0])\n\n et = ExtraTreesClassifierModel()\n et.n_estimators = 100\n et.random_state = pipeline.random_seed\n et.search_models(x, y)\n\n ft = pd.DataFrame([x.columns.values, et.best_model.feature_importances_],\n index=['Feature', 'Weight']).transpose().sort_values(by=['Weight'], ascending=False)\n\n x.drop('RANDOM_NUMBER', axis=1, inplace=True)\n\n report_df = ft\n folder = ''\n path = pkg_resources.resource_filename('crcdal', 'cache/' + folder + '/' + self.sub_folder +'/')\n pkg_resources.ensure_directory(path)\n report_df.to_csv(path + pipeline.dataset_tag + '_extra_trees_weights_report.csv')\n","sub_path":"data_science_layer/reporting/extra_trees_classifier_feature_weights.py","file_name":"extra_trees_classifier_feature_weights.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"464876859","text":"import operator\n\nimport mixins\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.db.models import Q\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import get_object_or_404\nfrom django.views.generic.detail import DetailView\nfrom django.views.generic.edit import CreateView\nfrom django.views.generic.edit import FormMixin\nfrom django.views.generic.list import ListView\nfrom networking import forms\nfrom networking import models\nfrom networking.mailer import send_email\nfrom omnis.cms.mixins import ApplicationContextMixin\nfrom profiles.models import UserProfile\n\n\nclass MemberSearchView(\n mixins.AuthMixin,\n ApplicationContextMixin,\n FormMixin,\n ListView\n):\n \"\"\" \"Find a buddy\" search page \"\"\"\n template_name = 'networking/member_search.html'\n model = UserProfile\n form_class = forms.MemberSearchForm\n paginate_by = settings.PAGINATE_BY\n allow_empty = True\n object_list = []\n\n def get_queryset(self):\n \"\"\"\n Gets only active members who are visible on the network\n\n :return: Queryset\n \"\"\"\n return (self.model\n .objects.filter_network_visible()\n .filter(user__is_active=True))\n\n def search(self, queryset, form):\n \"\"\"\n Search filtering method for queryset.\n\n :param queryset: UserProfile model queryset\n :param form: MemberSearchForm instance\n :return: Queryset without filtered items\n \"\"\"\n\n if not form.is_valid():\n return queryset\n\n name = form.cleaned_data.get('name')\n work_interest = form.cleaned_data.get('work_interest')\n sector = form.cleaned_data.get('sector')\n\n names = name.split(' ')\n has_multiple_names = len(names) > 1\n\n if name:\n if has_multiple_names:\n # If there are multiple words - each word will be searched\n # against first name and last name columns\n\n def _queries(item):\n return Q(user__first_name__contains=item) | Q(\n user__last_name__icontains=item)\n\n query_list = [_queries(n) for n in names]\n query = reduce(operator.or_, query_list)\n queryset = queryset.filter(query)\n else:\n queryset = queryset.filter(\n Q(user__first_name__icontains=name) | Q(\n user__last_name__icontains=name)\n )\n\n if work_interest:\n queryset = queryset.filter(work_interest=work_interest)\n\n if sector:\n queryset = queryset.filter(sector=sector)\n\n return queryset\n\n # def get_context_object_name(self, object_list):\n # return 'object_list'\n\n def get_form_kwargs(self):\n \"\"\"\n Returns the keyword arguments for instantiating the form.\n \"\"\"\n kwargs = {\n 'initial': self.get_initial()\n }\n\n if self.request.GET:\n kwargs.update({\n 'data': self.request.GET,\n })\n\n return kwargs\n\n def get(self, request, *args, **kwargs):\n \"\"\"\n GET request handler.\n Puts both a form and object list into the context\n\n :param request: django request instance\n :param args: URL parameters\n :param kwargs: URL parameters\n :return: HTTP response\n \"\"\"\n form = self.get_form(self.get_form_class())\n return self.render_to_response(\n self.get_context_data(\n object_list=self.search(self.get_queryset(), form),\n form=form\n )\n )\n\n\nclass MemberProfileView(\n mixins.AuthMixin,\n ApplicationContextMixin,\n DetailView\n):\n template_name = 'networking/member_profile.html'\n model = UserProfile\n slug_field = 'user__username'\n\n def get_queryset(self):\n return self.model.objects.filter_active()\n\n\nclass MessageMemberView(\n mixins.AuthMixin,\n ApplicationContextMixin,\n CreateView\n):\n \"\"\" Display message form for authenticated users in recipient user\n profile that will allow to send message to the associated user inbox\n \"\"\"\n template_name = 'networking/member_message.html'\n model = models.MemberMessage\n form_class = forms.MemberMessageForm\n member = None\n\n def get_member(self):\n \"\"\"\n Get the member details. Member data is saved in self.member in order\n to perform 1 DB instead of 3\n\n :return: UserProfile instance of the message receiver user\n \"\"\"\n if not self.member:\n self.member = get_object_or_404(\n UserProfile,\n user__username=self.kwargs.get('slug')\n )\n return self.member\n\n def get_context_data(self, **kwargs):\n \"\"\"\n Add extra context data to templates\n\n :param kwargs:\n :return: context data + object ()\n \"\"\"\n data = super(MessageMemberView, self).get_context_data(**kwargs)\n data['object'] = self.get_member()\n return data\n\n def form_valid(self, form):\n \"\"\"\n Actions to carry out after valid form has been successfully submitted\n\n :param form: MemberMessageForm instance\n :return: Http redirect to the recipient member absolute URL\n \"\"\"\n recipient = self.get_member().user\n\n message = form.save(commit=False)\n message.sender = self.request.user\n message.recipient = recipient\n message.save()\n\n messages.success(self.request, \"Message sent successfully\")\n\n send_email('emails/network_message', recipient.email, **{\n 'message_obj': message,\n })\n\n return HttpResponseRedirect(self.get_member().get_absolute_url())\n","sub_path":"networking/views/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"490307626","text":"# Copyright 2021, The TensorFlow Federated Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the 'License');\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an 'AS IS' BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport collections\n\nfrom absl.testing import parameterized\nimport tensorflow as tf\n\nfrom tensorflow_federated.python.core.backends.native import execution_contexts\nfrom tensorflow_federated.python.aggregators import mean\nfrom tensorflow_federated.python.aggregators import sum_factory\nfrom tensorflow_federated.python.common_libs import structure\nfrom tensorflow_federated.python.core.api import computations\nfrom tensorflow_federated.python.core.api import test_case\nfrom tensorflow_federated.python.core.impl.federated_context import intrinsics\nfrom tensorflow_federated.python.core.impl.types import computation_types\nfrom tensorflow_federated.python.core.impl.types import placements\nfrom tensorflow_federated.python.core.templates import measured_process\nfrom tensorflow_federated.python.learning import model_examples\nfrom tensorflow_federated.python.learning import model_utils\nfrom tensorflow_federated.python.learning.optimizers import sgdm\nfrom tensorflow_federated.python.learning.templates import client_works\nfrom tensorflow_federated.python.learning.templates import composers\nfrom tensorflow_federated.python.learning.templates import distributors\nfrom tensorflow_federated.python.learning.templates import finalizers\nfrom tensorflow_federated.python.learning.templates import learning_process\n\nFLOAT_TYPE = computation_types.TensorType(tf.float32)\nMODEL_WEIGHTS_TYPE = computation_types.to_type(\n model_utils.ModelWeights(FLOAT_TYPE, ()))\nCLIENTS_SEQUENCE_FLOAT_TYPE = computation_types.at_clients(\n computation_types.SequenceType(FLOAT_TYPE))\n\n\ndef empty_at_server():\n return intrinsics.federated_value((), placements.SERVER)\n\n\n@computations.federated_computation()\ndef empty_init_fn():\n return empty_at_server()\n\n\n@computations.tf_computation()\ndef test_init_model_weights_fn():\n return model_utils.ModelWeights(trainable=tf.constant(1.0), non_trainable=())\n\n\ndef test_distributor():\n\n @computations.federated_computation(\n empty_init_fn.type_signature.result,\n computation_types.at_server(MODEL_WEIGHTS_TYPE))\n def next_fn(state, value):\n return measured_process.MeasuredProcessOutput(\n state, intrinsics.federated_broadcast(value), empty_at_server())\n\n return distributors.DistributionProcess(empty_init_fn, next_fn)\n\n\ndef test_client_work():\n\n @computations.tf_computation()\n def make_result(value, data):\n return client_works.ClientResult(\n update=value.trainable,\n update_weight=data.reduce(0.0, lambda x, y: x + y))\n\n @computations.federated_computation(\n empty_init_fn.type_signature.result,\n computation_types.at_clients(MODEL_WEIGHTS_TYPE),\n CLIENTS_SEQUENCE_FLOAT_TYPE)\n def next_fn(state, value, client_data):\n result = intrinsics.federated_map(make_result, (value, client_data))\n return measured_process.MeasuredProcessOutput(state, result,\n empty_at_server())\n\n return client_works.ClientWorkProcess(empty_init_fn, next_fn)\n\n\ndef test_aggregator():\n return mean.MeanFactory().create(FLOAT_TYPE, FLOAT_TYPE)\n\n\ndef test_finalizer():\n\n @computations.federated_computation(\n empty_init_fn.type_signature.result,\n computation_types.at_server(MODEL_WEIGHTS_TYPE),\n computation_types.at_server(FLOAT_TYPE))\n def next_fn(state, weights, updates):\n new_weights = intrinsics.federated_map(\n computations.tf_computation(lambda x, y: x + y),\n (weights.trainable, updates))\n new_weights = intrinsics.federated_zip(\n model_utils.ModelWeights(new_weights, ()))\n return measured_process.MeasuredProcessOutput(state, new_weights,\n empty_at_server())\n\n return finalizers.FinalizerProcess(empty_init_fn, next_fn)\n\n\nclass ComposeLearningProcessTest(test_case.TestCase):\n\n def test_learning_process_composes(self):\n process = composers.compose_learning_process(test_init_model_weights_fn,\n test_distributor(),\n test_client_work(),\n test_aggregator(),\n test_finalizer())\n\n self.assertIsInstance(process, learning_process.LearningProcess)\n self.assertEqual(\n process.initialize.type_signature.result.member.python_container,\n composers.LearningAlgorithmState)\n self.assertEqual(\n process.initialize.type_signature.result.member.global_model_weights,\n MODEL_WEIGHTS_TYPE)\n\n # Reported metrics have the expected fields.\n metrics_type = process.next.type_signature.result.metrics.member\n self.assertTrue(structure.has_field(metrics_type, 'distributor'))\n self.assertTrue(structure.has_field(metrics_type, 'client_work'))\n self.assertTrue(structure.has_field(metrics_type, 'aggregator'))\n self.assertTrue(structure.has_field(metrics_type, 'finalizer'))\n self.assertLen(metrics_type, 4)\n\n def test_one_arg_computation_init_raises(self):\n\n @computations.tf_computation(computation_types.TensorType(tf.float32))\n def init_model_weights_fn(x):\n return model_utils.ModelWeights(trainable=x, non_trainable=())\n\n with self.assertRaisesRegex(TypeError, 'Computation'):\n composers.compose_learning_process(init_model_weights_fn,\n test_distributor(), test_client_work(),\n test_aggregator(), test_finalizer())\n\n def test_not_tff_computation_init_raises(self):\n\n def init_model_weights_fn():\n return model_utils.ModelWeights(\n trainable=tf.constant(1.0), non_trainable=())\n\n with self.assertRaisesRegex(TypeError, 'Computation'):\n composers.compose_learning_process(init_model_weights_fn,\n test_distributor(), test_client_work(),\n test_aggregator(), test_finalizer())\n\n def test_federated_init_raises(self):\n\n @computations.federated_computation()\n def init_model_weights_fn():\n return intrinsics.federated_eval(test_init_model_weights_fn,\n placements.SERVER)\n\n with self.assertRaisesRegex(TypeError, 'unplaced'):\n composers.compose_learning_process(init_model_weights_fn,\n test_distributor(), test_client_work(),\n test_aggregator(), test_finalizer())\n\n def test_not_model_weights_init_raises(self):\n\n @computations.tf_computation()\n def init_model_weights_fn():\n return collections.OrderedDict(\n trainable=tf.constant(1.0), non_trainable=())\n\n with self.assertRaisesRegex(TypeError, 'ModelWeights'):\n composers.compose_learning_process(init_model_weights_fn,\n test_distributor(), test_client_work(),\n test_aggregator(), test_finalizer())\n\n def test_not_distributor_type_raises(self):\n distributor = test_distributor()\n bad_distributor = measured_process.MeasuredProcess(distributor.initialize,\n distributor.next)\n with self.assertRaisesRegex(TypeError, 'DistributionProcess'):\n composers.compose_learning_process(test_init_model_weights_fn,\n bad_distributor, test_client_work(),\n test_aggregator(), test_finalizer())\n\n def test_not_client_work_type_raises(self):\n client_work = test_client_work()\n bad_client_work = measured_process.MeasuredProcess(client_work.initialize,\n client_work.next)\n with self.assertRaisesRegex(TypeError, 'ClientWorkProcess'):\n composers.compose_learning_process(test_init_model_weights_fn,\n test_distributor(), bad_client_work,\n test_aggregator(), test_finalizer())\n\n def test_not_aggregator_type_raises(self):\n aggregator = test_aggregator()\n bad_aggregator = measured_process.MeasuredProcess(aggregator.initialize,\n aggregator.next)\n with self.assertRaisesRegex(TypeError, 'AggregationProcess'):\n composers.compose_learning_process(test_init_model_weights_fn,\n test_distributor(), test_client_work(),\n bad_aggregator, test_finalizer())\n\n def test_unweighted_aggregator_raises(self):\n bad_aggregator = sum_factory.SumFactory().create(FLOAT_TYPE)\n with self.assertRaisesRegex(TypeError, 'weighted'):\n composers.compose_learning_process(test_init_model_weights_fn,\n test_distributor(), test_client_work(),\n bad_aggregator, test_finalizer())\n\n def test_not_finalizer_type_raises(self):\n finalizer = test_finalizer()\n bad_finalizer = measured_process.MeasuredProcess(finalizer.initialize,\n finalizer.next)\n with self.assertRaisesRegex(TypeError, 'FinalizerProcess'):\n composers.compose_learning_process(test_init_model_weights_fn,\n test_distributor(), test_client_work(),\n test_aggregator(), bad_finalizer)\n\n # TODO(b/190334722): Add more tests that assert early errors are raised in the\n # _validate_args method, when adding custom error messages.\n\n\nclass VanillaFedAvgTest(test_case.TestCase, parameterized.TestCase):\n\n def _test_data(self):\n return tf.data.Dataset.from_tensor_slices(\n collections.OrderedDict(\n x=[[1.0, 2.0], [3.0, 4.0]],\n y=[[5.0], [6.0]],\n )).batch(2)\n\n def _test_batch_loss(self, model, weights):\n tf.nest.map_structure(lambda w, v: w.assign(v),\n model_utils.ModelWeights.from_model(model), weights)\n for batch in self._test_data().take(1):\n batch_output = model.forward_pass(batch, training=False)\n return batch_output.loss\n\n def test_loss_decreases(self):\n model_fn = model_examples.LinearRegression\n test_model = model_fn()\n fedavg = composers.build_basic_fedavg_process(\n model_fn=model_fn, client_learning_rate=0.1)\n client_data = [self._test_data()] * 3 # 3 clients with identical data.\n\n state = fedavg.initialize()\n last_loss = self._test_batch_loss(test_model, state.global_model_weights)\n for _ in range(5):\n fedavg_result = fedavg.next(state, client_data)\n state = fedavg_result.state\n metrics = fedavg_result.metrics\n loss = self._test_batch_loss(test_model, state.global_model_weights)\n self.assertLess(loss, last_loss)\n last_loss = loss\n\n self.assertIsInstance(state, composers.LearningAlgorithmState)\n self.assertLen(metrics, 4)\n for key in ['distributor', 'client_work', 'aggregator', 'finalizer']:\n self.assertIn(key, metrics)\n\n def test_created_model_raises(self):\n with self.assertRaises(TypeError):\n composers.build_basic_fedavg_process(model_examples.LinearRegression(),\n 0.1)\n\n @parameterized.named_parameters(('int', 1),\n ('optimizer', sgdm.build_sgdm(0.1)))\n def test_wrong_client_learning_rate_raises(self, bad_client_lr):\n with self.assertRaises(TypeError):\n composers.build_basic_fedavg_process(model_examples.LinearRegression(),\n bad_client_lr)\n\n\nif __name__ == '__main__':\n execution_contexts.set_local_python_execution_context()\n test_case.main()\n","sub_path":"tensorflow_federated/python/learning/templates/composers_test.py","file_name":"composers_test.py","file_ext":"py","file_size_in_byte":12327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"153925479","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\n# from django.contrib.auth import login, authenticate\n# from django.contrib.auth import update_session_auth_hash\nfrom django.shortcuts import render, redirect\nfrom .forms import SignUpForm, LoginForm, BookForm, BookAuthorForm, UpdateBookForm, EmployeeForm, UpdateEmployeeForm, \\\n UserDataForm\nfrom django.contrib import messages\nfrom django.contrib.auth import update_session_auth_hash, login, authenticate, logout, login as auth_login\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.forms import PasswordChangeForm\nfrom django.utils.translation import ugettext as _\nfrom django.contrib.auth.models import User\nfrom django.http import HttpResponse, JsonResponse\nfrom .models import Book, Book_Author, Employee, User_Data\nfrom django import template\nfrom django.contrib.auth.models import Group\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nimport json\n\n\n# Create your views here.\n\ndef signup(request):\n if request.method == 'POST':\n form = SignUpForm(request.POST)\n if form.is_valid():\n fs = form.save(commit=False)\n fs.current_user = request.user.id\n fs.save()\n messages.add_message(request, messages.INFO,\n 'You have been successfully registered, now you can Login.')\n return redirect('user_login')\n else:\n form = SignUpForm()\n return render(request, 'signup.html', {'form': form})\n\n\ndef user_login(request):\n if request.method == 'POST':\n username = request.POST.get('username')\n password = request.POST.get('password')\n user = authenticate(username=username, password=password)\n if user:\n auth_login(request, user)\n return redirect(home)\n else:\n messages.add_message(request, messages.INFO, 'The username and/or password you specified are not correct.')\n return redirect('user_login')\n else:\n form = LoginForm()\n return render(request, 'login.html', {'form': form})\n\n\n@login_required\ndef user_logout(request):\n logout(request)\n return redirect(user_login)\n\n\n# register = template.Library()\n\n\n@login_required\ndef home(request):\n return render(request, 'home.html', {})\n\n\n@login_required\ndef add_book_author(request):\n if request.method == 'POST':\n form = BookAuthorForm(request.POST)\n if form.is_valid():\n fs = form.save(commit=False)\n fs.current_user = request.user.id\n fs.save()\n\n messages.add_message(request, messages.SUCCESS, 'Added Book Author Successfully...!')\n return redirect(add_book_author)\n else:\n form = BookAuthorForm()\n return render(request, 'add_book_author.html', {'form': form})\n\n\n@login_required\ndef add_book(request):\n if request.method == 'POST':\n form = BookForm(request.POST, request.FILES)\n if form.is_valid():\n fs = form.save(commit=False)\n fs.current_user = request.user.id\n fs.save()\n messages.add_message(request, messages.INFO, 'Saved Successfully...!')\n return redirect(add_book)\n else:\n if request.method == 'POST':\n form = BookForm()\n book_obj = Book.objects.filter(current_user=request.user.id)\n page = request.GET.get('page', 1)\n paginator = Paginator(book_obj, 3)\n user = paginator.page(page)\n\n book_title = request.POST.get('book_title')\n series = request.POST.get('series')\n author_name = request.POST.get('author_name')\n pages = request.POST.get('pages')\n\n if book_title and series:\n search_book_obj = Book.objects.filter(book_title__icontains=book_title, series=series,\n current_user=request.user.id)\n return render(request, 'add_book.html',\n {'form': form, 'books': user, 'search_book': search_book_obj})\n\n elif series:\n search_book_obj = Book.objects.filter(series__icontains=series, current_user=request.user.id)\n return render(request, 'add_book.html',\n {'form': form, 'books': user, 'search_book': search_book_obj})\n\n elif author_name:\n search_book_obj1 = Book_Author.objects.filter(name__icontains=author_name,\n current_user=request.user.id)\n if search_book_obj1:\n search_book_obj = Book.objects.filter(author_name=search_book_obj1,\n current_user=request.user.id)\n return render(request, 'add_book.html',\n {'form': form, 'books': user, 'search_book': search_book_obj})\n else:\n return render(request, 'add_book.html', {'form': form, 'books': user})\n\n elif pages:\n search_book_obj = Book.objects.filter(pages__icontains=pages, current_user=request.user.id)\n return render(request, 'add_book.html',\n {'form': form, 'books': user, 'search_book': search_book_obj})\n\n elif book_title:\n search_book_obj = Book.objects.filter(book_title__icontains=book_title,\n current_user=request.user.id)\n return render(request, 'add_book.html',\n {'form': form, 'books': user, 'search_book': search_book_obj})\n else:\n return render(request, 'add_book.html', {'form': form, 'books': user})\n else:\n return redirect('add_book')\n else:\n form = BookForm()\n\n book_obj = Book.objects.filter(current_user=request.user.id)\n\n page = request.GET.get('page', 1)\n paginator = Paginator(book_obj, 3)\n try:\n user = paginator.page(page)\n except PageNotAnInteger:\n user = paginator.page(1)\n except EmptyPage:\n user = paginator.page(paginator.num_pages)\n return render(request, 'add_book.html', {'form': form, 'books': user})\n\n\n@login_required\ndef delete_book(request, id):\n book_obj = Book.objects.get(id=id)\n book_obj.delete()\n messages.add_message(request, messages.INFO, 'Delete Successfully...!')\n return redirect('add_book')\n\n\n@login_required\ndef update_book(request, id):\n book_obj = Book.objects.get(id=id)\n if request.method == 'POST':\n form = UpdateBookForm(request.POST, instance=book_obj)\n if form.is_valid():\n fs = form.save(commit=False)\n fs.current_user = request.user.id\n fs.save()\n messages.add_message(request, messages.INFO, 'Update Successfully...!')\n return redirect('add_book')\n return render(request, 'update_book.html', {'form': form})\n else:\n form = UpdateBookForm(instance=book_obj)\n return render(request, 'update_book.html', {'form': form})\n\n\n@login_required\ndef change_password(request):\n if request.method == 'POST':\n form = PasswordChangeForm(request.user, request.POST)\n if form.is_valid():\n fs = form.save(commit=False)\n fs.current_user = request.user.id\n fs.save()\n update_session_auth_hash(request, request.user)\n messages.success(request, _('Your password was successfully updated!'))\n return redirect('change_password')\n else:\n messages.error(request, _('Please correct the error.'))\n else:\n form = PasswordChangeForm(request.user)\n return render(request, 'change_password.html', {'form': form})\n\n\n@login_required\ndef group(request):\n group_obj = Group.objects.filter(current_user=request.user.id)\n group_obj2 = Group.objects.get(name='All_User')\n group_obj3 = Group.objects.filter(name='Test')\n group = Group.objects.get(name='MyGroup')\n user = User.objects.get(username='gaurav')\n user.groups.add(group)\n return HttpResponse(\"Group test...!\")\n\n\n@login_required\ndef add_employee(request):\n emp_obj = None\n if request.method == 'POST':\n form = EmployeeForm(request.POST)\n if form.is_valid():\n fs = form.save(commit=False)\n fs.current_user = request.user.id\n fs.save()\n # form.save()\n messages.add_message(request, messages.INFO, 'Added successfully...!')\n return redirect('add_employee')\n else:\n form = EmployeeForm()\n emp_obj = Employee.objects.filter(current_user=request.user.id)\n return render(request, 'add_employee.html', {'form': form, 'employee': emp_obj})\n\n\n@login_required\ndef delete_employee(request, id):\n emp_obj = Employee.objects.get(id=id)\n emp_obj.delete()\n messages.add_message(request, messages.INFO, 'Delete Successfully...!')\n return redirect('add_employee')\n\n\n@login_required\ndef edit_employee(request, id):\n emp_obj = Employee.objects.get(id=id)\n if request.method == 'POST':\n form = UpdateEmployeeForm(request.POST, instance=emp_obj)\n if form.is_valid():\n fs = form.save(commit=False)\n fs.current_user = request.user.id\n fs.save()\n messages.add_message(request, messages.INFO, 'Update Successfully...!')\n return redirect('add_employee')\n return render(request, 'update_employee.html', {'form': form})\n else:\n form = UpdateEmployeeForm(instance=emp_obj)\n return render(request, 'update_employee.html', {'form': form})\n\n\n@login_required\ndef user_data(request):\n if request.method == 'POST':\n form = UserDataForm(request.POST)\n if form.is_valid():\n fs = form.save(commit=False)\n fs.current_user = request.user.id\n fs.save()\n\n username = request.POST.get('username')\n age = request.POST.get('age')\n\n messages.add_message(request, messages.INFO, 'Added successfully...!')\n return redirect('userdata')\n else:\n form = UserDataForm()\n return render(request, 'userdata.html', {'form': form})\n\n\n@login_required\ndef get_book_data(request):\n data = json.loads(request.POST.get(\"data\"))\n print(data)\n if data:\n book_title = None\n author_name = None\n book_image = None\n book_id = None\n series = None\n description = None\n pages = None\n for item in data:\n book_title = item.get('ajax_book_title', None)\n\n image = item.get('ajax_book_image', None)\n book_image1 = image.split('/', 2)[2]\n book_image = \"/\" + book_image1\n\n book_id = item.get('ajax_id', None)\n author_name = item.get('ajax_author_name', None)\n series = item.get('ajax_series', None)\n description = item.get('ajax_description', None)\n pages = item.get('ajax_pages', None)\n\n if book_title:\n book_author_obj = Book_Author.objects.get(name=author_name)\n book_obj = Book(book_title=book_title, book_image=book_image, id=book_id, series=series,\n description=description, pages=pages, author_name=book_author_obj,\n current_user=request.user.id)\n book_obj.save()\n return JsonResponse(True, safe=False)\n\n else:\n return JsonResponse(False, safe=False)\n\n\n# @login_required\n# def get_book_data(request):\n# \tif request.method == 'POST':\n# \t\tif request.is_ajax():\n# \t\t\tbook_title = request.POST.get('ajax_book_title')\n# \t\t\tbook_image = request.FILES.get('ajax_book_image')\n# \t\t\tbook_id \t= request.POST.get('ajax_id')\n# \t\t\tauthor_name = request.POST.get('ajax_author_name')\n# \t\t\tseries \t \t= request.POST.get('ajax_series')\n# \t\t\tdescription = request.POST.get('ajax_description')\n# \t\t\tpages \t\t= request.POST.get('ajax_pages')\n#\n# \t\t\tif book_title:\n# \t\t\t\tbook_author_obj = Book_Author.objects.get(name=author_name)\n# \t\t\t\tbook_obj = Book(book_title=book_title, book_image=book_image, id=book_id, series=series,\n# \t\t\t\t\t\t\t\tdescription=description, pages=pages, author_name=book_author_obj)\n# \t\t\t\tbook_obj.save()\n# \t\t\t\treturn JsonResponse(True, safe=False)\n# \t\t\telse:\n# \t\t\t\treturn JsonResponse(False, safe=False)\n# \t\telse:\n# \t\t\tprint \"Ajax not working\"\n# \telse:\n# \t\t\tprint \"Ajax not working\"\n\n\n@login_required\ndef delete_book_data(request):\n if request.method == 'POST':\n if request.is_ajax():\n book_id = request.POST.get('id')\n if book_id:\n book_obj = Book.objects.get(id=book_id)\n book_obj.delete()\n return JsonResponse(True, safe=False)\n else:\n JsonResponse(False, safe=False)\n","sub_path":"Project/Book_Mgnt_System/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":13093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"278589096","text":"import torch\nimport torch.nn as nn\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy import interpolate\n\n\nclass RandomSplineSCM(nn.Module): \n def __init__(self, span=6, num_anchors=10, order=3, range_scale=1.,\n input_noise=False, output_noise=True):\n super(RandomSplineSCM, self).__init__()\n\n self._span = span\n self._num_anchors = num_anchors\n self._range_scale = range_scale\n self._x = np.linspace(-span, span, num_anchors)\n self._y = np.random.uniform(-range_scale * span, range_scale * span, size=(num_anchors,))\n self._spline_spec = interpolate.splrep(self._x, self._y, k=order)\n\n self.input_noise = input_noise\n self.output_noise = output_noise\n\n def forward(self, x, z=None):\n if z is None:\n z = self.sample(x.size())\n if self.input_noise:\n x = x + z\n _x_np = x.detach().cpu().numpy().squeeze()\n _y_np = interpolate.splev(_x_np, self._spline_spec)\n _y = torch.from_numpy(_y_np).view(-1, 1).float()\n y = _y + z if self.output_noise else _y\n return y\n\n @staticmethod\n def sample(input_size):\n return torch.normal(torch.zeros(*input_size), torch.ones(*input_size))\n \n def plot(self, x, title='Samples from the SCM', label=None, **kwargs):\n y = self.forward(x)\n plt.figure()\n plt.scatter(x.squeeze().numpy(), y.squeeze().numpy(), marker='+', label=label, **kwargs)\n plt.title(title)\n plt.xlabel('X')\n plt.ylabel('Y')\n plt.show()\n\n\ndef generate_data_categorical(num_samples, pi_A, pi_B_A):\n \"\"\"\n Sample data using ancestral sampling\n x_A ~ Categorical(pi_A)\n x_B ~ Categorical(pi_B_A[x_A])\n \"\"\"\n N = pi_A.shape[0]\n r = np.arange(N)\n\n x_A = np.dot(np.random.multinomial(1, pi_A, size=num_samples), r)\n x_Bs = np.zeros((num_samples, N), dtype=np.int64)\n for i in range(num_samples):\n x_Bs[i] = np.random.multinomial(1, pi_B_A[x_A[i]], size=1)\n x_B = np.dot(x_Bs, r)\n\n return np.vstack((x_A, x_B)).T.astype(np.int64)\n\n\ndef sample_from_normal(mean, std, nsamples, n_features):\n return torch.normal(torch.ones(nsamples, n_features) * mean,\n torch.ones(nsamples, n_features) * std)\n","sub_path":"causal_meta/utils/data_utils.py","file_name":"data_utils.py","file_ext":"py","file_size_in_byte":2283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"33963083","text":"import numpy as np\n# from math import pi, sqrt\nfrom pymoab import core, types, rng, topo_util, skinner\n# import time\n# import pyximport; pyximport.install()\nimport os\n# from PyTrilinos import Epetra, AztecOO, EpetraExt # , Amesos\n# import math\n# import os\n# import shutil\n# import random\nimport sys\n# import configparser\nimport io\nimport yaml\nimport scipy.sparse as sp\n\n\ndef get_OR_classic_nv1(mb, all_volumes, ID_reord_tag, primal_id_tag1, fine_to_primal1_classic_tag):\n meshsets_nv1 = mb.get_entities_by_type_and_tag(0, types.MBENTITYSET, np.array([primal_id_tag1]), np.array([None]))\n OR1 = sp.lil_matrix((len(meshsets_nv1), len(all_volumes)))\n for m in meshsets_nv1:\n elems = mb.get_entities_by_handle(m)\n gids = mb.tag_get_data(ID_reord_tag, elems, flat=True)\n nc = mb.tag_get_data(fine_to_primal1_classic_tag, elems, flat=True)\n OR1[nc, gids] = np.ones(len(elems))\n\n return OR1\n\ndef get_OR_classic_nv2(mb, primal_id_tag1, primal_id_tag2):\n meshsets_nv2 = mb.get_entities_by_type_and_tag(0, types.MBENTITYSET, np.array([primal_id_tag2]), np.array([None]))\n meshsets_nv1 = mb.get_entities_by_type_and_tag(0, types.MBENTITYSET, np.array([primal_id_tag1]), np.array([None]))\n OR = sp.lil_matrix((len(meshsets_nv2), len(meshsets_nv1)))\n\n for m2 in meshsets_nv2:\n childs = mb.get_child_meshsets(m2)\n nc2 = mb.tag_get_data(primal_id_tag2, m2, flat=True)[0]\n nc1 = np.array([mb.tag_get_data(primal_id_tag1, child, flat=True)[0] for child in childs])\n nc2 = np.repeat(nc2, len(nc1))\n OR[nc2, nc1] = np.ones(len(nc1))\n\n return OR\n","sub_path":"utils/restriction_classic.py","file_name":"restriction_classic.py","file_ext":"py","file_size_in_byte":1617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"9148990","text":"from django.db import models\nimport os\nimport re\n\n# Create your models here.\n\n\"\"\"\nIt must start with a 4, 5 or 6.\nIt must contain exactly 16 digits.\nIt must only consist of digits (0-9).\nIt may have digits in groups of 4, separated by one hyphen \"-\".\nIt must NOT use any other separator like ' ' , '_', etc.\nIt must NOT have 4 or more consecutive repeated digits.\nValid Credit Card Numbers\n4253625879615786\n4424424424442444\n5122-2368-7954-3214\n\"\"\"\n\ndef upload_to_arquivo(instance, name):\n extesion = (os.path.splitext(name)[-1]).replace('.', '')\n return os.path.join('media', 'files', '%s.%s' % (instance.auto_date, extesion))\n\nclass Validate(models.Model):\n auto_date = models.DateTimeField(auto_now_add=True)\n lines = models.IntegerField(blank=True, null=True)\n text_file = models.FileField(upload_to=upload_to_arquivo)\n\n def __str__(self):\n return '#%s%s%s' % (self.pk, self.auto_date.year, self.auto_date.month)\n\n def validate_card_file(self):\n try:\n validated_numbers = []\n readlines = self.text_file.readlines()\n first_line = int(str(readlines[0].decode(\"utf-8\")).strip())\n if 0 < first_line < 100:\n readlines.pop(0)\n self.lines = first_line\n self.save()\n for number in readlines:\n validated_numbers.append(self.regex_credit_card(number))\n return validated_numbers\n else:\n validated_numbers.append(['The first line has to be greater than zero and less than one hundred: %s' % first_line, False])\n return validated_numbers\n\n except ValueError:\n validated_numbers.append(['The first line has to be a number.', False])\n return validated_numbers\n\n except Exception as inst:\n validated_numbers.append(['ERROR: %s' % str(inst), False])\n return validated_numbers\n\n def regex_credit_card(self, number):\n try:\n regex = re.compile(r\"^[456]([\\d]{15}|[\\d]{3}(-[\\d]{4}){3})$\")\n regex_consect = re.compile(r\"(?:([0-9])\\1{3,})\")\n number = str(number.decode(\"utf-8\")).strip()\n if not bool(regex_consect.search(number.replace('-',''))):\n return [number, bool(regex.match(number))]\n else:\n return [number, False]\n\n except Exception as inst:\n return ['ERROR: %sn' % str(inst), False]\n","sub_path":"validate/credit_card/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"83674702","text":"import setuptools\nfrom textwrap import wrap\ndef NewVersion():\n try:\n f = open(\"nbapi/version.txt\",\"r+\")\n v1 = str(f.read()).replace(\".\",\"\")\n temp = str(int(v1) + 1)\n temp1 = wrap(temp,1)\n print(temp1)\n out = \".\".join(temp1)\n f.seek(0)\n f.truncate()\n f.write(out)\n f.close()\n print(out)\n return out\n except:\n return \"5644\"\n\n \n \n\n\n\n\nlong_description = \"Simple Anime API package\"\nversionF=NewVersion()\nsetuptools.setup(\n name=\"nbapi\",\n version=versionF,\n author=\"LazyNeko\",\n author_email=\"nekobot.help@gmail.com\",\n description=\"A small API for anime/nekos\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/LazyNeko1/nbapi\",\n #packages=[\"nbapi\"],\n install_requires=['aiohttp','requests','asyncio'],\n package_data={\n '':['nbapi/*.py','nbapi/*.txt']\n },\n py_modules=['nbapi/__init__',\n 'nbapi/apil',\n 'nbapi/rimg',\n 'nbapi/simg',\n 'nbapi/version'\n ],\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\"\n ]\n)\n","sub_path":"pypi_install_script/nbapi-1.9.0.4.2.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"146675293","text":"import uuid\nfrom random import Random\n\nfrom faker import Faker\nfrom flask import Blueprint, current_app, g, jsonify, request\nfrom itsdangerous import BadData, SignatureExpired\nfrom marshmallow import fields\nfrom sqlalchemy.orm import selectinload\nfrom werkzeug.exceptions import Forbidden, UnprocessableEntity\n\nfrom .core.auth import user_info_from_app_token\nfrom .core.db import db\nfrom .core.util import DATE_FORMAT, format_dt\nfrom .core.webargs import abort, use_args, use_kwargs\nfrom .models import Newdle, Participant\nfrom .schemas import (\n MyNewdleSchema,\n NewdleSchema,\n NewNewdleSchema,\n ParticipantSchema,\n RestrictedNewdleSchema,\n UpdateNewdleSchema,\n UpdateParticipantSchema,\n UserSchema,\n UserSearchResultSchema,\n)\n\n\napi = Blueprint('api', __name__, url_prefix='/api')\n\n\ndef allow_anonymous(fn):\n fn._allow_anonymous = True\n return fn\n\n\n@api.errorhandler(UnprocessableEntity)\ndef _handle_webargs_error(exc):\n data = getattr(exc, 'data', None)\n if data and 'messages' in data:\n return jsonify(error='invalid_args', messages=data['messages']), exc.code\n return jsonify(error=exc.description), exc.code\n\n\n@api.before_request\ndef require_token():\n g.user = None\n auth = request.headers.get('Authorization')\n token = None\n if auth and auth.startswith('Bearer '):\n token = auth[7:]\n if not token:\n view_func = current_app.view_functions[request.endpoint]\n if getattr(view_func, '_allow_anonymous', False):\n return\n return jsonify(error='token_missing'), 401\n try:\n user = user_info_from_app_token(token)\n except SignatureExpired:\n return jsonify(error='token_expired'), 401\n except BadData:\n return jsonify(error='token_invalid'), 401\n g.user = user\n\n\n@api.route('/me/')\ndef me():\n return UserSchema().jsonify(g.user)\n\n\ndef _generate_fake_users():\n f = Faker()\n f.seed_instance(0)\n\n def _generate_fake_user():\n first_name = f.first_name()\n last_name = f.last_name()\n email = f'{first_name}.{last_name}@{f.domain_name()}'.lower()\n return {\n 'first_name': first_name,\n 'last_name': last_name,\n 'email': email,\n 'uid': str(uuid.uuid4()),\n }\n\n return [_generate_fake_user() for _ in range(100)] + [\n {\n 'first_name': g.user['first_name'],\n 'last_name': g.user['last_name'],\n 'email': g.user['email'],\n 'uid': g.user['uid'],\n }\n ]\n\n\ndef _match(user, query):\n parts = query.lower().split()\n name = f'{user[\"first_name\"]} {user[\"last_name\"]}'.lower()\n return any(\n all(p in field for p in parts) for field in (name, user['email'].lower())\n )\n\n\n@api.route('/users/')\n@use_kwargs({'q': fields.String(required=True)})\ndef users(q):\n data = [x for x in _generate_fake_users() if _match(x, q)]\n return {\n 'total': len(data),\n 'users': UserSearchResultSchema(many=True).dump(data[:10]),\n }\n\n\n@api.route('/users/busy')\n@use_kwargs(\n {\n 'date': fields.Date(format=DATE_FORMAT, required=True),\n 'email': fields.String(required=True),\n }\n)\ndef get_busy_times(date, email):\n rnd = Random(date.isoformat() + email)\n if rnd.randint(0, 1):\n start = rnd.randint(5, 22)\n end = rnd.randint(start + 1, 24)\n return jsonify([[start, end]])\n else:\n start = rnd.randint(7, 10)\n end = rnd.randint(start + 1, start + 3)\n start2 = rnd.randint(14, 16)\n end2 = rnd.randint(start2 + 1, start2 + 5)\n return jsonify([[start, end], [start2, end2]])\n\n\n@api.route('/newdles/mine')\ndef get_my_newdles():\n newdle = (\n Newdle.query.options(selectinload('participants'))\n .filter_by(creator_uid=g.user['uid'])\n .order_by(Newdle.final_dt.isnot(None), Newdle.final_dt.desc(), Newdle.id.desc())\n .all()\n )\n return MyNewdleSchema(many=True).jsonify(newdle)\n\n\n@api.route('/newdle/', methods=('POST',))\n@use_kwargs(NewNewdleSchema(), locations=('json',))\ndef create_newdle(title, duration, timezone, timeslots, participants):\n newdle = Newdle(\n title=title,\n creator_uid=g.user['uid'],\n creator_name=f'{g.user[\"first_name\"]} {g.user[\"last_name\"]}',\n duration=duration,\n timezone=timezone,\n timeslots=timeslots,\n participants={Participant(**p) for p in participants},\n )\n db.session.add(newdle)\n db.session.commit()\n return NewdleSchema().jsonify(newdle)\n\n\n@api.route('/newdle/')\n@allow_anonymous\ndef get_newdle(code):\n newdle = Newdle.query.filter_by(code=code).first_or_404('Invalid code')\n restricted = not g.user or newdle.creator_uid != g.user['uid']\n schema_cls = RestrictedNewdleSchema if restricted else NewdleSchema\n return schema_cls().jsonify(newdle)\n\n\n@api.route('/newdle/', methods=('PATCH',))\n@use_args(UpdateNewdleSchema(), locations=('json',))\ndef update_newdle(args, code):\n newdle = Newdle.query.filter_by(code=code).first_or_404('Invalid code')\n if newdle.creator_uid != g.user['uid']:\n raise Forbidden\n for key, value in args.items():\n setattr(newdle, key, value)\n db.session.commit()\n return NewdleSchema().jsonify(newdle)\n\n\n@api.route('/newdle//participants/')\n@allow_anonymous\ndef get_participant(code, participant_code):\n participant = Participant.query.filter(\n Participant.newdle.has(Newdle.code == code),\n Participant.code == participant_code,\n ).first_or_404('Invalid code')\n return ParticipantSchema().jsonify(participant)\n\n\n@api.route('/newdle//participants/', methods=('PATCH',))\n@allow_anonymous\n@use_args(UpdateParticipantSchema(), locations=('json',))\ndef update_participant(args, code, participant_code):\n participant = Participant.query.filter(\n Participant.newdle.has(Newdle.code == code),\n Participant.code == participant_code,\n ).first_or_404('Invalid code')\n if 'answers' in args:\n # We can't validate this in webargs, since we don't have access\n # to the Newdle inside the schema...\n invalid = args['answers'].keys() - set(participant.newdle.timeslots)\n if invalid:\n abort(\n 422,\n messages={\n 'answers': {\n format_dt(key): {'key': ['Invalid timeslot']} for key in invalid\n }\n },\n )\n for key, value in args.items():\n setattr(participant, key, value)\n db.session.commit()\n return ParticipantSchema().jsonify(participant)\n","sub_path":"newdle/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":6654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"158385446","text":"from sklearn import metrics\nimport numpy as np, pandas as pd\nimport os, shutil\n\n\nindicators = [['LiveBirths18', 'municip', 'cparish']]\n\n\ndef rmse_error(actual, predicted):\n return np.sqrt(metrics.mean_squared_error(actual, predicted))\n\ndef mae_error(actual, predicted):\n return metrics.mean_absolute_error(actual, predicted)\n\ndef nrmse_error(actual, predicted):\n range = max(actual) - min(actual)\n return (np.sqrt(metrics.mean_squared_error(actual, predicted)))/range\n\ndef nmae_error(actual, predicted):\n range = max(actual) - min(actual)\n return (metrics.mean_absolute_error(actual, predicted))/range\n\n\nfor indicator in indicators:\n print('\\n\\n--- EVALUATING', indicator[0])\n\n # Read census .csv\n data_census = pd.read_csv(os.path.join('Statistics', indicator[0], indicator[2] + '.csv'), sep=';', index_col=False)\n data_census[indicator[1].upper()] = data_census[indicator[1].upper()].str.upper()\n data_census[indicator[2].upper()] = data_census[indicator[2].upper()].str.upper()\n\n path = os.path.join('Estimates', indicator[0], '2Evaluate')\n newpath = os.path.join('Estimates', indicator[0])\n\n file_names = os.listdir(path)\n for file in file_names:\n if file.endswith('.csv'):\n filee = os.path.join(path, file)\n newpathe = os.path.join(newpath, file)\n\n data_estimated = pd.read_csv(filee, sep=\";\")\n data_estimated[indicator[1].upper()] = data_estimated[indicator[1].upper()].str.upper()\n data_estimated[indicator[2].upper()] = data_estimated[indicator[2].upper()].str.upper()\n\n actual = []\n predicted = []\n for index, row in data_estimated.iterrows():\n name_ab1 = row[indicator[1].upper()]\n name_ab2 = row[indicator[2].upper()]\n predicted_value = row['VALUE']\n\n actual_value = data_census.loc[(data_census[indicator[1].upper()] == name_ab1) &\n (data_census[indicator[2].upper()] == name_ab2), 'VALUE']\n\n if (len(actual_value.index) == 1):\n actual.append(actual_value.values[0])\n predicted.append(predicted_value)\n\n r1 = round(rmse_error(actual, predicted), 1)\n r2 = round(mae_error(actual, predicted), 1)\n r3 = round(nrmse_error(actual, predicted), 4)\n r4 = round(nmae_error(actual, predicted), 4)\n\n print('\\n- Error metrics (' + file + '):')\n print('-', r1, '&', r2, '&', r3, '&', r4)\n\n shutil.copyfile(filee, newpathe)\n os.remove(filee)\n","sub_path":"evaluateResults.py","file_name":"evaluateResults.py","file_ext":"py","file_size_in_byte":2627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"14532608","text":"import turtle as t\nimport math\n\nsize1 = 100\nnum_of_side = 4\nsize2 = math.sqrt(size1**2 + size1**2)\nsize3 = math.sqrt(size1**2 + size1**2)/2\nangle1 = 90\nangle2 = 45\nangle3 = 135\ncolor = 'blue'\n\nt.color(color)\nt.pensize(5)\n\nt.left(angle1)\nt.forward(size1)\nt.right(angle1)\nt.forward(size1)\nt.right(angle3)\nt.forward(size2)\nt.left(angle3)\nt.forward(size1)\nt.left(angle3)\nt.forward(size2)\nt.right(angle1)\nt.forward(size3)\nt.right(angle1)\nt.forward(size3)\n\nt.right(angle2)\nt.forward(size1)\nt.done()","sub_path":"Python code/test01.py","file_name":"test01.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"217623652","text":"import requests\nimport json\n\nmerakikey = \"6bec40cf957de430a6f1f2baa056b99a4fac9ea0\"\nbase_url = 'https://api.meraki.com/api/v0'\nendpoint = '/organizations'\n\nheaders = {\n 'X-Cisco-meraki-API-Key': merakikey\n}\n\n\ntry:\n response = requests.get(url=f\"{base_url}{endpoint}\", headers=headers)\n if response.status_code == 200:\n orgs = response.json()\n print(orgs)\nexcept Exception as ex:\n print(ex)","sub_path":"stage0.py","file_name":"stage0.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"587882904","text":"class DiasTrabalhados:\r\n \r\n def consid_bissexto(self):\r\n while True:\r\n try:\r\n self.bissexto= input(\"Considerar nos cálculos o ano bissexto? Sim ou Não: \")\r\n self.bissexto=self.bissexto.lower() \r\n if self.bissexto == \"sim\" or self.bissexto.startswith(\"s\"):\r\n self.bissexto=True\r\n break\r\n elif self.bissexto == \"não\" or self.bissexto.startswith(\"n\"):\r\n self.bissexto=False\r\n break\r\n else:\r\n print('Entrada inválida, digite \"Sim\" ou \"Não\"')\r\n except:\r\n continue\r\n return self.bissexto\r\n\r\n\r\n def trabalha_feriados(self):\r\n while True:\r\n try:\r\n self.trabalho= input(\"Considerar o trabalho nos feriados? Sim ou Não: \")\r\n self.trabalho=self.trabalho.lower() \r\n if self.trabalho == \"sim\" or self.trabalho.startswith(\"s\"):\r\n self.trabalho=True\r\n break\r\n elif self.trabalho == \"não\" or self.trabalho.startswith(\"n\"):\r\n self.trabalho=False\r\n break\r\n else:\r\n print('Entrada inválida, digite \"Sim\" ou \"Não\"')\r\n except:\r\n continue\r\n return self.trabalho\r\n\r\n\r\n def dias_feriados_ano(self):\r\n while True:\r\n try:\r\n if self.trabalho==False:\r\n self.feriados= int(input(\"Considerar quantos dias de feriados por ano: \"))\r\n break\r\n elif self.trabalho==True:\r\n self.feriados=0\r\n break\r\n except ValueError:\r\n print(\"Erro!!! o numero tem que ser inteiro e maior ou igual do que um\")\r\n continue\r\n return self.feriados\r\n\r\n\r\n def dias_trabalhados(self):\r\n if self.bissexto==True:\r\n self.dias=((365*3+366)/4-52-self.feriados)/12\r\n elif self.bissexto==False:\r\n self.dias=(365-52-self.feriados)/12\r\n return \"%.2f\"%self.dias\r\n\r\n\r\n\r\nt1=DiasTrabalhados()\r\nprint(t1.consid_bissexto())\r\nprint(t1.trabalha_feriados())\r\nprint(t1.dias_feriados_ano())\r\nprint(t1.dias_trabalhados())\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"2015/ClassDiasTrabalhados.py","file_name":"ClassDiasTrabalhados.py","file_ext":"py","file_size_in_byte":2362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"307581275","text":"\ndef access(flow, filter_flow=False):\n wdist_specification = (\n \"wdist = distance_d_to_c * mass_destination_d_to_c\"\n \"* (origin != destination) * (origin != competitor)\"\n )\n if filter_flow:\n wdist_specification += \" * (weight_o_to_c > 0)\"\n\n return (\n flow.query(\"origin != destination\")\n .groupby(\"origin\")\n # this gives us a set of the \"reachable\" destinations from origin o,\n # which is also the set of competitors to d\n .destination.agg(set)\n .to_frame(\"competitor\")\n # merge competitors back to each flow, giving the competitors to each destination:\n .merge(flow, left_index=True, right_on=\"destination\", how=\"right\")\n # this duplicates flow o->d for each competitor of destination d\n .explode(\"competitor\")\n # this looks up the flow from origin to competitor and gets their weight.\n .merge(\n flow,\n left_on=(\"origin\", \"competitor\"),\n right_on=(\"origin\", \"destination\"),\n suffixes=(\"\", \"_o_to_c\"),\n )\n # this looks up the flow from destination to competitor and gets their weight\n .merge(\n flow,\n left_on=(\"destination\", \"competitor\"),\n right_on=(\"origin\", \"destination\"),\n suffixes=(\"\", \"_d_to_c\"),\n )\n # this constructs the mass * distance term, but forces it to be zero when:\n # there is no flow from the origin to the competitor\n # the origin is its own competitor\n # the origin is its own destination\n .eval(wdist_specification)\n # now, grouping by flow o -> d lets us compute the sum of wdist,\n # which has already zeroed out competitors with no flow from origin o\n .groupby([\"origin\", \"destination\"])\n .wdist.sum()\n # cleaning this up and merging it back into the data frame:\n .reset_index()\n .rename(columns=dict(wdist=\"accessibility\"))\n )\n\nif __name__ == \"__main__\":\n import data\n a1 = access(data.toy())\n\n numpy.testing.assert_array_equal(\n a1.accessibility,\n [\n 0, # A_aa is always zero\n 30 * 20, # A_ab is mass of c times distance from b to c\n 25 * 20, # A_ac is mass of b times distance from c to b\n 30 * 10, # A_ba is mass of c times distance from a to c\n 0, # A_bb is always zero\n 60 * 10, # A_bc is mass of a times the distance from c to a\n 25 * 2, # A_ca is mass of b times distance from a to b\n 60 * 2, # A_cb is mass of a times distance from b to a\n 0, # A_cc is always zero\n ],\n )\n print(\"passed no filter.\")\n a2 = access_slow(data.toy(), filter_flow=True)\n\n numpy.testing.assert_array_equal(\n a2.accessibility,\n [\n 0, # A_aa is always zero\n 30 * 20, # A_ab is mass of c times distance from b to c if flow a -> c\n 25 * 20 * 0, # A_ac is mass of b times distance from c to b if flow a -> b\n 30 * 10, # A_ba is mass of c times distance from a to c if flow b -> c\n 0, # A_bb is always zero\n 60 * 10, # A_bc is mass of a times the distance from c to a if flow b -> a\n 25 * 2, # A_ca is mass of b times distance from c to b if flow c -> b\n 60 * 10 * 0, # A_cb has no routes from c to a if flow c -> a\n 0, # A_cc is always zero\n ],\n )\n print(\"passed with filter.\")\n\n","sub_path":".ipynb_checkpoints/access_tab-checkpoint.py","file_name":"access_tab-checkpoint.py","file_ext":"py","file_size_in_byte":3493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"611618002","text":"import os\nimport frontmatter\nfrom jinja2 import Template\nfrom jinja2 import Environment, PackageLoader, FileSystemLoader\nfrom pprint import pprint\n\n\nclass FrontMatterTemplate(Template):\n def __init__(self, *args, **kwargs):\n super(FrontMatterTemplate, self).__init__(*args, **kwargs)\n\n def render(self, **kwargs):\n if ((self.filename and\n self.filename != \"\") and\n \"content\" not in kwargs):\n message = frontmatter.load(self.filename)\n\n if \"page\" not in kwargs:\n kwargs[\"page\"] = {}\n\n for key in message.keys():\n kwargs[\"page\"][key] = message[key]\n\n if \"layout\" in kwargs[\"page\"]:\n template = env.from_string(str(message.content))\n template.environment = env\n content = template.render(**kwargs)\n template2 = env.get_template(\"_layouts/\" + kwargs[\"page\"][\"layout\"] + \".html\")\n kwargs[\"content\"] = content\n return template2.render(**kwargs)\n else:\n template = env.from_string(str(message.content))\n template.environment = env\n return template.render(**kwargs)\n else:\n return super(FrontMatterTemplate, self).render(**kwargs)\n\nloader = FileSystemLoader(\n [os.path.join(os.path.dirname(__file__), \"../templates/_includes\"),\n os.path.join(os.path.dirname(__file__), \"../templates/\")])\nenv = Environment(loader=loader)\n","sub_path":"vanteem/template.py","file_name":"template.py","file_ext":"py","file_size_in_byte":1518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"307537733","text":"# -*- coding:utf8 -*-\n# ==============================================================================\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"\nThis module implements data process strategies.\n\"\"\"\nimport sys\n\nreload(sys)\nsys.setdefaultencoding(\"utf-8\")\nimport os\nimport json\nimport logging\nimport numpy as np\nfrom collections import Counter\nimport jieba\nimport re\n\n\nclass Dataset(object):\n \"\"\"\n This module implements the APIs for loading and using baidu reading comprehension dataset\n \"\"\"\n\n def __init__(self, args):\n self.logger = logging.getLogger(\"alibaba\")\n self.args = args\n if self.args.predict:\n self.test_sets = self._load_test_dataset(args.preposs_file)\n else:\n self.data_sets = self._load_dataset(args.preposs_file)\n self.train_set, self.dev_set = self._shuffle_and_split_data_set(self.data_sets)\n\n def _load_dataset(self, data_path):\n \"\"\"\n Loads the dataset\n Args:\n data_path: the data file to load\n \"\"\"\n with open(data_path, \"r\") as fin:\n data_set = []\n for idx, line in enumerate(fin):\n line = unicode(line, encoding=\"utf8\")\n sample = {}\n line_list = str(line).strip().split(\"|\")\n if len(line_list) != 4:\n self.logger.warning(\"第{}行数据格式错误\".format(idx + 1))\n continue\n else:\n sample[\"id\"] = line_list[0]\n sample[\"document1\"] = [\n unicode(_, \"utf8\") for _ in line_list[1].split(\" \")\n ]\n sample[\"document1_character\"] = self._add_character(\n line_list[1].split(\" \")\n )\n sample[\"document2\"] = [\n unicode(_, \"utf8\") for _ in line_list[2].split(\" \")\n ]\n sample[\"document2_character\"] = self._add_character(\n line_list[2].split(\" \")\n )\n sample[\"label\"] = self._label_2_list(int(line_list[3]))\n data_set.append(sample)\n self.logger.info(\"DataSet size {} sample\".format(len(data_set)))\n\n return data_set\n\n def _load_test_dataset(self, data_path):\n \"\"\"\n Loads the dataset\n Args:\n data_path: the data file to load\n \"\"\"\n with open(data_path, \"r\") as fin:\n data_set = []\n for idx, line in enumerate(fin):\n line = unicode(line, encoding=\"utf8\")\n sample = {}\n line_list = str(line).strip().split(\"|\")\n if len(line_list) != 3:\n self.logger.warning(\"第{}行数据格式错误\".format(idx + 1))\n continue\n else:\n sample[\"id\"] = line_list[0]\n sample[\"document1\"] = [\n unicode(_, \"utf8\") for _ in line_list[1].split(\" \")\n ]\n sample[\"document1_character\"] = self._add_character(\n line_list[1].split(\" \")\n )\n sample[\"document2\"] = [\n unicode(_, \"utf8\") for _ in line_list[2].split(\" \")\n ]\n sample[\"document2_character\"] = self._add_character(\n line_list[2].split(\" \")\n )\n data_set.append(sample)\n self.logger.info(\"DataSet size {} sample\".format(len(data_set)))\n\n return data_set\n def _add_character(self, word_list):\n \"\"\"\n Add the characters\n Args:\n word_list: list of words\n Returns:\n list of characters\n \"\"\"\n character_list = []\n for word in word_list:\n character_list.append([character for character in unicode(word, \"utf8\")])\n return character_list\n\n def _shuffle_and_split_data_set(self, data_set):\n \"\"\"\n 打乱并且分割数据集\n \"\"\"\n data_size = len(data_set)\n indices = np.arange(data_size)\n np.random.shuffle(indices)\n index = int(data_size * (1 - self.args.dev))\n train_indices = indices[0:index]\n dev_indices = indices[index:-1]\n train_set = []\n dev_set = []\n for idx in train_indices:\n train_set.append(data_set[idx])\n for idx in dev_indices:\n dev_set.append(data_set[idx])\n return train_set, dev_set\n\n def get_mini_batchs(self, batch_size, set_name=\"train\", shuffle=False, predict=False):\n # self.train_set, self.dev_set = self._shuffle_and_split_data_set(self.data_sets)\n if set_name == \"train\":\n data_set = self.train_set\n elif set_name == \"dev\":\n data_set = self.dev_set\n elif set_name == 'test':\n data_set = self.test_sets\n else:\n raise NotImplementedError(\"No data set named as {}\".format(set_name))\n data_size = len(data_set)\n indices = np.arange(data_size)\n if shuffle:\n np.random.shuffle(indices)\n for batch_start in np.arange(0, data_size, batch_size):\n batch_indices = indices[batch_start : batch_start + batch_size]\n yield self._one_mini_batch(data_set, batch_indices, predict=predict)\n\n def _one_mini_batch(self, data, batch_indices, predict=False):\n \"\"\"\n Get one mini batch\n Args:\n data: all data\n batch_indices: the indices of the samples to be selected\n Returns:\n one batch of data\n \"\"\"\n if predict:\n batch_data = {\n \"raw_data\": [data[i] for i in batch_indices],\n \"document1_ids\": [],\n \"document2_ids\": [],\n \"document1_character_ids\": [],\n \"document2_character_ids\": [],\n \"id\": [],\n }\n else:\n batch_data = {\n \"raw_data\": [data[i] for i in batch_indices],\n \"document1_ids\": [],\n \"document2_ids\": [],\n \"document1_character_ids\": [],\n \"document2_character_ids\": [],\n \"label\": [],\n \"id\": [],\n }\n for data in batch_data[\"raw_data\"]:\n try:\n batch_data[\"document1_ids\"].append(data[\"document1_ids\"])\n batch_data[\"document2_ids\"].append(data[\"document2_ids\"])\n batch_data[\"document1_character_ids\"].append(\n data[\"document1_character_ids\"]\n )\n batch_data[\"document2_character_ids\"].append(\n data[\"document2_character_ids\"]\n )\n batch_data[\"id\"].append(data[\"id\"])\n if predict:\n continue\n batch_data[\"label\"].append(data[\"label\"])\n\n except KeyError:\n print(\" \")\n return batch_data\n\n def word_iter(self, set_name=None, character=False):\n \"\"\"\n Iterates over all the words in the dataset\n Args:\n set_name: if it is set, then the specific set will be used\n Returns:\n a generator\n \"\"\"\n if set_name is None:\n data_set = self.train_set + self.dev_set\n elif set_name == \"train\":\n data_set = self.train_set\n elif set_name == \"dev\":\n data_set = self.dev_set\n else:\n raise NotImplementedError(\"No data set named as {}\".format(set_name))\n if data_set is not None:\n for sample in data_set:\n if character:\n for token in sample[\"document1_character\"]:\n for character in token:\n yield character\n for token in sample[\"document2_character\"]:\n for character in token:\n yield character\n else:\n for token in sample[\"document1\"]:\n yield token\n for token in sample[\"document2\"]:\n yield token\n\n def convert_to_ids(self, vocab, character=False, set_name=None):\n \"\"\"\n Convert the question and passage in the original dataset to ids\n Args:\n vocab: the vocabulary on this dataset\n \"\"\"\n if set_name is None:\n data_sets = [self.train_set, self.dev_set]\n elif set_name == 'test':\n data_sets = [self.test_sets]\n\n for data_set in data_sets:\n if data_set is None:\n continue\n for sample in data_set:\n if character:\n sample[\"document1_character_ids\"] = vocab.convert_character_to_ids(\n sample[\"document1_character\"],\n self.args.max_document_len,\n self.args.max_word_len,\n )\n sample[\"document2_character_ids\"] = vocab.convert_character_to_ids(\n sample[\"document2_character\"],\n self.args.max_document_len,\n self.args.max_word_len,\n )\n else:\n sample[\"document1_ids\"] = vocab.convert_to_ids(\n sample[\"document1\"], self.args.max_document_len\n )\n sample[\"document2_ids\"] = vocab.convert_to_ids(\n sample[\"document2\"], self.args.max_document_len\n )\n\n def _label_2_list(self, label):\n label_list = [0 for _ in range(2)]\n label_list[label] = 1\n return label_list\n","sub_path":"submit/vec_feat_xgb_test/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":10398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"269277755","text":"import argparse\n\nimport torch\nimport webdataset as wds\nfrom torch.utils.data import DataLoader\n\nimport satflow.data.datasets\nfrom satflow.core.utils import load_config\nfrom satflow.data.datasets import get_dataset\n\n\ndef get_loaders(config):\n \"\"\"\n Get Dataloaders for train/test\n Args:\n config: Dict, configuration dictionary for the dataset\n\n Returns:\n Dict[Dataloader] containing the train and test dataloaders\n \"\"\"\n print(config)\n train_dset = wds.WebDataset(config[\"sources\"][\"train\"])\n val_dset = wds.WebDataset(config[\"sources\"][\"val\"])\n test_dset = wds.WebDataset(config[\"sources\"][\"test\"])\n train_dataset = get_dataset(config[\"name\"])([train_dset], config=config, train=True)\n val_dataset = get_dataset(config[\"name\"])([val_dset], config=config, train=False)\n test_dataset = get_dataset(config[\"name\"])([test_dset], config=config, train=False)\n\n train_dataloader = DataLoader(\n train_dataset,\n num_workers=config[\"num_workers\"],\n batch_size=config[\"batch_size\"],\n pin_memory=True,\n )\n val_dataloader = DataLoader(\n val_dataset,\n num_workers=config[\"num_workers\"],\n batch_size=config[\"batch_size\"],\n pin_memory=True,\n )\n test_dataloader = DataLoader(\n test_dataset,\n num_workers=config[\"num_workers\"],\n batch_size=config[\"batch_size\"],\n pin_memory=True,\n )\n\n return {\"train\": train_dataloader, \"val\": val_dataloader, \"test\": test_dataloader}\n\n\ndef setup_experiment(args):\n \"\"\"\n Sets up the basic logging, etc. common things for running experiments\n\n Args:\n args: Commandline arguments\n\n Returns:\n\n \"\"\"\n\n config = load_config(args.config)\n\n config[\"dataset\"][\"num_workers\"] = args.num_workers\n return config\n\n\ndef get_args():\n\n parser = argparse.ArgumentParser(description=\"SatFlow\")\n\n # cuda\n parser.add_argument(\n \"--with_cpu\",\n default=False,\n action=\"store_true\",\n help=\"use CPU in case there's no GPU support\",\n )\n\n # train\n parser.add_argument(\n \"-c\", \"--config\", default=\"./config.yaml\", type=str, help=\"Path to Config File\"\n )\n parser.add_argument(\n \"--local_rank\",\n type=int,\n default=-1,\n help=\"local rank passed from distributed launcher\",\n )\n\n parser.add_argument(\n \"-nw\",\n \"--num_workers\",\n type=int,\n default=1,\n help=\"Number of dataloader workers\",\n )\n\n # Include DeepSpeed configuration arguments\n # parser = deepspeed.add_config_arguments(parser)\n\n args = parser.parse_args()\n\n return args\n","sub_path":"satflow/core/training_utils.py","file_name":"training_utils.py","file_ext":"py","file_size_in_byte":2641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"253525562","text":"from rest_framework import serializers\nfrom api.objects import CandidateObject\n\nfrom .models import *\n\nclass CandidateSerializer(serializers.Serializer):\n candidate = serializers.CharField(required = True)\n created_at = serializers.DateTimeField(required = True)\n sentiment = serializers.FloatField(required = True)\n text = serializers.CharField(required = True)\n user = serializers.CharField(required = True)\n tid = serializers.CharField(required = True)\n\n def restore_object(self, attrs, instance = None):\n if instance is not None:\n instance.candidate = attrs.get('candidate', instance.candidate)\n instance.created_at = attrs.get('created_at', instance.created_at)\n instance.sentiment = attrs.get('sentiment', instance.sentiment)\n instance.text = attrs.get('text', instance.text)\n instance.user = attrs.get('user', instance.user)\n instance.tid = attrs.get('tid', instance.tid)\n instance.anger = attrs.get('anger', instance.anger)\n instance.disgust = attrs.get('disgust', instance.disgust)\n instance.fear = attrs.get('fear', instance.fear)\n instance.joy = attrs.get('joy', instance.joy)\n instance.sadness = attrs.get('sadness', instance.sadness)\n instance.openness = attrs.get('openness', instance.openness)\n instance.conscientiousness = attrs.get('conscientiousness', instance.conscientiousness)\n instance.extraversion = attrs.get('extraversion', instance.extraversion)\n instance.agreeableness = attrs.get('agreeableness', instance.agreeableness)\n instance.range = attrs.get('range', instance.range)\n return CandidateObject(**attrs)\n\nclass AggregateSerializer(serializers.Serializer):\n candidate = serializers.CharField(required = True)\n datetime_block = serializers.IntegerField(required = True)\n count_pos_sentiment = serializers.IntegerField(required = False)\n count_neg_sentiment = serializers.IntegerField(required = False)\n avg_pos_sentiment = serializers.FloatField(required = False)\n avg_neg_sentiment = serializers.FloatField(required = False)\n\n def restore_object(self, attrs, instance = None):\n if instance is not None:\n instance.candidate = attrs.get('candidate', instance.candidate)\n instance.datetime_block = attrs.get('datetime_block', instance.datetime_block)\n instance.count_pos_sentiment = attrs.get('count_pos_sentiment', instance.count_pos_sentiment)\n instance.count_neg_sentiment = attrs.get('count_neg_sentiment', instance.count_neg_sentiment)\n instance.avg_pos_sentiment = attrs.get('avg_pos_sentiment', instance.avg_pos_sentiment)\n instance.avg_neg_sentiment = attrs.get('avg_neg_sentiment', instance.avg_neg_sentiment)\n return AggregateObject(**attrs)\n\nclass WordSerializer(serializers.Serializer):\n candidate = serializers.CharField(required = True)\n text = serializers.CharField(required = True)\n count = serializers.IntegerField(required = False)\n tf = serializers.FloatField(required = False)\n\n def restore_object(self, attrs, instance = None):\n if instance is not None:\n instance.candidate = attrs.get('candidate', instance.candidate)\n instance.text = attrs.get('text', instance.text)\n instance.count = attrs.get('count', instance.count)\n instance.tf = attrs.get('tf', instance.tf)\n return WordObject(**attrs)","sub_path":"django_api/api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":3526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"199158701","text":"from selenium import webdriver\nimport time\nimport os\nfrom selenium.webdriver.chrome.options import Options\n\ndef main():\n chrome_options = webdriver.ChromeOptions()\n chrome_options.binary_location = os.environ.get(\"GOOGLE_CHROME_BIN\")\n chrome_options.add_argument(\"--headless\")\n chrome_options.add_argument(\"--disable-dev-shm-usage\")\n chrome_options.add_argument(\"--no-sandbox\")\n\n driver = webdriver.Chrome(executable_path='chromedriver', options=chrome_options)\n\n erros_seguidos = 0\n\n while(True):\n try:\n if erros_seguidos > 5:\n print()\n print(\"Muitos erros consecutivos, reniciando função ####################################################################\")\n print()\n return True, ''\n\n driver.get(\"https://www.google.com.br/covid19/mobility/index.html?hl=pt-BR\")\n time.sleep(2)\n link = driver.find_element_by_xpath('/html/body/div[1]/section[3]/div[2]/div/div[1]/p[3]/a[1]')\n link = link.get_attribute(\"href\")\n print('Operação concluida #####################################################################')\n break\n\n except Exception as e:\n erros_seguidos = erros_seguidos + 1\n print(e)\n\n driver.quit()\n return False, link\n\ndef crawler():\n continua = True\n while continua:\n print('Iniciando operação #####################################################################')\n try:\n continua, link = main()\n except Exception as e:\n print(e)\n continua = True\n return link","sub_path":"funcoes_auxiliares/crawler_mobility.py","file_name":"crawler_mobility.py","file_ext":"py","file_size_in_byte":1642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"361589594","text":"import re\nimport logging\n\nfrom hailtop.aiocloud import aiogoogle\n\nfrom .config import BENCHMARK_RESULTS_PATH\n\n\nlog = logging.getLogger('benchmark')\n\nBENCHMARK_BUCKETS = ['hail-benchmarks', 'hail-benchmarks-2']\n\nFILE_PATH_REGEX = re.compile(r'gs://((?P[^/]+)/)(?P.*)')\n\n\ndef get_geometric_mean(prod_of_means, num_of_means):\n return prod_of_means ** (1.0 / num_of_means)\n\n\ndef round_if_defined(x):\n if x is not None:\n return round(x, 6)\n return None\n\n\ndef parse_file_path(regex, name):\n match = regex.fullmatch(name)\n return match.groupdict()\n\n\ndef enumerate_list_of_trials(list_of_trials):\n trial_indices = []\n wall_times = []\n within_group_idx = []\n for count, trial in enumerate(list_of_trials):\n wall_times.extend(trial)\n within_group_idx.extend([f'{j+1}' for j in range(len(trial))])\n temp = [count] * len(trial)\n trial_indices.extend(temp)\n res_dict = {'trial_indices': trial_indices, 'wall_times': wall_times, 'within_group_index': within_group_idx}\n return res_dict\n\n\nasync def list_benchmark_files(fs: aiogoogle.GoogleStorageAsyncFS):\n list_of_files = []\n for bucket in BENCHMARK_BUCKETS:\n files = await fs.listfiles(f'gs://{bucket}/', recursive=True)\n list_of_files.extend(files)\n return list_of_files\n\n\nasync def submit_test_batch(batch_client, sha):\n batch = batch_client.create_batch(attributes={'sha': sha})\n known_file_path = 'gs://hail-benchmarks-2/tpoterba/0.2.21-f6f337d1e9bb.json'\n dest_file_path = f'{BENCHMARK_RESULTS_PATH}/0-{sha}.json'\n job = batch.create_job(\n image='ubuntu:20.04',\n command=['/bin/bash', '-c', 'touch /io/test; sleep 5'],\n resources={'cpu': '0.25'},\n input_files=[(known_file_path, '/io/test')],\n output_files=[('/io/test', dest_file_path)],\n )\n await batch.submit(disable_progress_bar=True)\n log.info(f'submitting batch for commit {sha}')\n return job.batch_id\n","sub_path":"benchmark-service/benchmark/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"310940803","text":"from openerp.osv import fields, osv, orm\nfrom openerp.osv import fields, osv, orm\nfrom tempfile import NamedTemporaryFile\nfrom io import BytesIO\nimport os\nimport xlrd\nfrom openpyxl import Workbook\nfrom openpyxl import load_workbook\nfrom openpyxl.cell.cell import Cell\nimport base64\nfrom datetime import date\nfrom openerp.tools.translate import _\nimport openerp.addons.decimal_precision as dp\n\nclass elote_direct_order_history_log_wizard(osv.osv_memory):\n _name = \"elote.direct.order.history.log.wizard\"\n _description = \"Load direct orders history log\"\n\n _columns = {\n 'wizard_id': fields.many2one('elote.direct.order.history.loader.wizard'),\n 'line': fields.integer('Line number'),\n 'message': fields.html('Message')\n }\n\nclass elote_direct_order_history_loader_wizard(osv.osv_memory):\n _name = \"elote.direct.order.history.loader.wizard\"\n _description = \"Load direct orders history from spreadsheet\"\n\n def _default_lot(self, cr, uid, ids, context=None):\n lot_obj = self.pool.get('elote.lote')\n open_lot_ids = lot_obj.search(cr, uid, [('state','!=','open'),('direct','=',True)])\n return open_lot_ids and open_lot_ids[0] or False\n\n _columns = {\n 'lot_id': fields.many2one('elote.lote', string='Lot', domain=\"[('state','=','open'),('direct','=',True)]\", required=True),\n 'file': fields.binary('Select file'),\n 'date': fields.date('Assign this date to orders', default=fields.date.today),\n 'products_loaded': fields.integer('Products loaded'),\n 'message_ids': fields.one2many('elote.direct.order.history.log.wizard', 'wizard_id', string='messages'),\n 'error_message': fields.html('Errors found'),\n 'state': fields.selection([('choose', 'choose'), # choose file\n ('file_uploaded', 'file_uploaded'), # File uploaded succesfully\n ('error', 'error')]) # Errors found uploading file\n }\n\n _defaults = {\n 'state': 'choose',\n 'lot_id': _default_lot,\n }\n\n def load_file(self, cr, uid, ids, context=None):\n def get_value(cell, type = 's'):\n if type == 's':\n value = ''\n if cell.value != '-':\n if cell:\n if cell.data_type == Cell.TYPE_STRING:\n value = cell.value.strip()\n elif cell.data_type == Cell.TYPE_NUMERIC:\n value = str(cell.value)\n value = value.strip()\n else:\n value = cell.value\n elif type == 'i':\n value = 0\n if cell:\n if cell.data_type == Cell.TYPE_STRING:\n try:\n value = int(cell.value)\n except ValueError:\n value = 0\n elif cell.data_type == Cell.TYPE_NUMERIC:\n value = cell.value\n else:\n value = 0\n if cell:\n if cell.data_type == Cell.TYPE_STRING:\n try:\n value = float(cell.value)\n except ValueError:\n value = 0\n elif cell.data_type == Cell.TYPE_NUMERIC:\n value = cell.value\n return value\n\n def get_string(cell):\n return get_value(cell)\n\n def get_int(cell):\n return get_value(cell, 'i')\n\n def get_float(cell):\n return get_value(cell, 'd')\n\n if context is None:\n context = {}\n this = self.browse(cr, uid, ids)[0]\n form_data = self.read(cr, uid, ids, ['lot_id','date','file','id'])[0]\n partner_obj = self.pool.get('res.partner')\n user_obj = self.pool.get('res.users')\n product_pool = self.pool.get('product.product')\n supplierinfo_obj = self.pool.get('product.supplierinfo')\n po_obj = self.pool.get('purchase.order')\n log_obj = self.pool.get('elote.direct.order.history.log.wizard')\n\n #lot_id = form_data['lot_id']\n lot_id = self.browse(cr, uid, ids).lot_id\n orders_date = self.browse(cr, uid, ids).date\n xlsfile = base64.decodestring(form_data['file'])\n rightLines = []\n errorLines = []\n lineNumber = 0\n state = 'file_uploaded'\n try:\n wb = load_workbook(filename=BytesIO(xlsfile))\n except:\n temp = NamedTemporaryFile(delete=False)\n temp.write(xlsfile)\n temp.close()\n book = xlrd.open_workbook(filename=temp.name)\n index = 0\n nrows, ncols = 0, 0\n while nrows * ncols == 0:\n sheet = book.sheet_by_index(index)\n nrows = sheet.nrows\n ncols = sheet.ncols\n index += 1\n\n # prepare a xlsx sheet\n wb = Workbook()\n sheet1 = wb.get_active_sheet()\n\n for row in xrange(0, nrows):\n for col in xrange(0, ncols):\n value = sheet.cell_value(row, col)\n if col == 3:\n if row > 0:\n value = float(value)\n else:\n if (type(value) is float):\n value = str(\"%.15g\" % value)\n sheet1.cell(row=row+1, column=col+1).value = value\n os.unlink(temp.name)\n\n if len(errorLines) == 0:\n # Storage for each purchase order for each partner\n vals = {}\n ws = wb.get_sheet_by_name(wb.get_sheet_names()[0])\n bs_name = ''\n pc_name = ''\n order_number = 0\n for row in ws.iter_rows():\n lineNumber += 1\n if row[0].value != None and not (row[0].data_type == Cell.TYPE_STRING and row[0].value.strip().lower() in ['cp', 'sb', '',]):\n row_info = []\n for cell in row:\n if cell.value:\n row_info.append(get_string(cell))\n else:\n row_info.append('')\n errorMessage = ''\n supplier_name = get_string(row[0])\n delivery_to = get_string(row[1])\n isbn = get_string(row[2])\n quantity = get_float(row[3])\n price = get_float(row[4])\n\n partner_id = (partner_obj.search(cr, uid, ['|',('name','=',supplier_name),('ref','=',supplier_name)])+\n [False])[0]\n bs_id = partner_obj.browse(cr, uid, (partner_obj.search(cr, uid, ['|',('name','=',delivery_to),('ref','=',delivery_to)])+\n [False])[0])\n user_id = False\n for child in bs_id.child_ids:\n for user in child.user_ids:\n user_id = user.id\n break\n\n product_id = False\n # Busqueda por producto\n product_id = 0\n product_tmpl_id = 0\n product_obj = product_pool.search(cr, uid, [('ean13', '=', isbn)])\n if product_obj:\n product_id = product_obj[0]\n product_tmpl_id = product_pool.browse(cr, uid, product_id).product_tmpl_id.id\n else:\n product_info = {\n 'ean13': isbn,\n 'default_code': isbn,\n 'isbn': isbn,\n 'name': isbn,\n 'list_price': price,\n 'route_ids': False,\n }\n try:\n product_id = product_pool.create(cr, uid, product_info, context=context)\n product_tmpl_id = product_pool.browse(cr, uid, product_id).product_tmpl_id.id\n except:\n errorMessage += ('' if errorMessage == '' else ', ') + _('Invalid EAN13: ') + product_info['ean13']\n\n quantity = int(quantity)\n if user_id != bs_name or partner_id != pc_name:\n bs_name = user_id\n pc_name = partner_id\n order_number += 1\n\n # Check values\n if not partner_id:\n errorMessage += ('' if errorMessage == '' else ', ') + _('Invalid supplier: ') + supplier_name\n if not user_id:\n errorMessage += ('' if errorMessage == '' else ', ') + _('User not found for ') + delivery_to\n if not product_id:\n errorMessage += ('' if errorMessage == '' else ', ') + _('Invalid product: ') + isbn\n if not quantity:\n errorMessage += ('' if errorMessage == '' else ', ') + _('Invalid quantity')\n\n # Create purchase order line\n if errorMessage == '':\n line_vals = {\n 'product_id': product_id,\n 'name': isbn,\n 'date_planned': orders_date,\n 'boxes': quantity,\n 'price_unit': price,\n 'product_qty': quantity,\n }\n rightLines.append(\n {\n 'order_number': order_number,\n 'partner_id': partner_id,\n 'bs_id': bs_id,\n 'user_id': user_id,\n 'line_vals': line_vals\n }\n )\n else:\n errorLines.append('' + str(lineNumber) + ': ' + errorMessage)\n log_obj.create(cr, uid, {'wizard_id': ids[0], 'line': lineNumber, 'message': errorMessage}, context)\n if len(errorLines) > 0:\n errorMessage = 'Errors found in the following spreadsheet row'\n if len(errorLines) > 1:\n errorMessage += 's'\n errorMessage += ':
'\n for line in errorLines:\n errorMessage += line + \"
\"\n state = 'error'\n else:\n for insertLine in rightLines:\n order_number = insertLine['order_number']\n partner_id = insertLine['partner_id']\n bs_id = insertLine['bs_id']\n user_id = insertLine['user_id']\n line_vals = insertLine['line_vals']\n\n # Build values for purchase order for each partner.\n if order_number in vals:\n # Exists purchase order, add product.\n vals[order_number]['order_line'].append((0,0,line_vals))\n else:\n # Not exists purchase order, create one.\n vals[order_number] = {\n 'direct': True,\n 'partner_id': partner_id,\n 'sb_origin': bs_id.id,\n 'responsible_id': user_id,\n 'invoice_method': 'manual',\n 'date_order': orders_date,\n 'pricelist_id': 1,\n 'location_id': 1,\n 'lote_id': lot_id.id,\n 'state': 'received',\n 'order_line': [(0,0,line_vals)]\n }\n # Create PO in database.\n context.update({'direct': True})\n po_ids = [ po_obj.create(cr, uid, po, context) for po in vals.values() ]\n self.write(cr, uid, ids, {\n 'state': state,\n 'error_message': errorMessage\n }, context=context)\n return {\n 'type': 'ir.actions.act_window',\n 'res_model': self._name,\n 'view_mode': 'form',\n 'view_type': 'form',\n 'res_id': this.id,\n 'views': [(False, 'form')],\n 'target': 'new',\n }\n\n","sub_path":"elote_purchase_order_loader/wizard/elote_direct_order_history_loader_wizard.py","file_name":"elote_direct_order_history_loader_wizard.py","file_ext":"py","file_size_in_byte":12515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"409115229","text":"'''6.Write a program which accept number from user and check whether that number is positive or\r\nnegative or zero.'''\r\n\r\ndef ChkNumber(num):\r\n if num == 0:\r\n print(\"You entered zero\");\r\n elif num<0:\r\n print(\"You entered a negative number\");\r\n else:\r\n print(\"You entered a positive number\");\r\n\r\ndef main():\r\n num = input(\"Enter a number: \");\r\n ChkNumber(num);\r\n \r\nif __name__ == \"__main__\":\r\n main();\r\n","sub_path":"Assignments/Assignment1/Assignment1_3.py","file_name":"Assignment1_3.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"5381431","text":"\r\n\r\n\r\n#Final Project\r\n\r\ncomp= {\"Which city is known as the city of love? \":'Paris',\"One of the 7 wonders of the world (in İndia)? \":'Tac Mahal',\r\n \"Year of French revolution? \":'1789',\"Who painted the Guernica painting? \":'Picasso',\r\n \"Capital city of Turkey? \": 'Ankara',\"What is the surname president of France? \":'Macron',\r\n \"Which organ pumps blood?\":'Heart', \"what is the first letter of alphabet \":'A', \r\n \"what is the capital city of Malaysia \": 'Kuala Lumpur',\"Most populated city of Turkey \":'Istanbul'}\r\n\r\npoints=0\r\n\r\nfor question in comp.keys():\r\n print(question)\r\n answer=input(\"Answer: \")\r\n if answer in comp[question]:\r\n points+=10\r\n \r\n\r\nprint(points)\r\n\r\n\r\nif points<=50:\r\n print(\"You lost!\")\r\n \r\nelse:\r\n print(\"You won!!\")\r\n\r\n\r\n","sub_path":"Final Project/Final project (2).py","file_name":"Final project (2).py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"430187830","text":"import collections \nfrom collections import Counter\nimport pandas as pd\nimport numpy as np\n\ndef main():\n myfile = \"thermalizedsicnt-end-298-0-min.SiCNT\"\n line_where_Bonds_start = 5298 #end of the atoms section - 1 (as read in eclipse editor)\n with open('new_data_file.data', 'w') as outfile, open(myfile, 'r') as infile, \\\n open('data-file-sicnt-water-0.6.data', 'r') as firstdatafile:\n atom_list_new = positions_list(infile)\n \n #lines = f.readlines()\n #lines = [l for l in lines if \"ROW\" in l]\n #outfile.writelines(lines)\n \n #get the header of the first data file\n for j, linei in enumerate(firstdatafile):\n if j < 25: #where header end, this one is not gonna change\n outfile.write(linei)\n firstdatafile.seek(0)\n \n \n #print the new coordinates:\n for i in atom_list_new:\n print(*i, file = outfile)\n \n \n #get the bonds and the angles\n for j, linei in enumerate(firstdatafile):\n if j > line_where_Bonds_start:\n outfile.write(linei)\n \n \n \n \n \ndef get_header_from_first_data_file():\n pass\n \ndef positions_list(file_tx): #return ordered atoms \n dict_atoms = {}\n coords = []\n for skip in range(9):\n next(file_tx)\n for rows in file_tx:\n values = rows.split()\n keys = int(values[0])\n atom_type = int(values[2])\n if atom_type == 1: #Si\n q = 0.6\n mol_id = 1\n elif atom_type == 2: #C\n q = -0.6\n mol_id = 1\n \n elif atom_type == 3: #H\n q = 0.5897\n mol_id = 2\n \n elif atom_type == 4: #O\n q = -1.1794\n mol_id = 2\n \n coords = [mol_id, int(values[2]), q, float(values[3]), float(values[4]), float(values[5])]\n \n \n dict_atoms[keys] = coords \n positions = sorted(dict_atoms.items()) \n file_tx.seek(0)\n \n #convert to a list:\n final_list = []\n for i in positions:\n ncoords = [i[0], i[1][0], i[1][1], i[1][2], i[1][3], i[1][4], i[1][5]]\n final_list.append(ncoords)\n \n \n return final_list\n \nif __name__ == \"__main__\": main()\n","sub_path":"jose/in/0-dump-to-data-file.py","file_name":"0-dump-to-data-file.py","file_ext":"py","file_size_in_byte":2405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"294923320","text":"import os,sys\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport glob\nfrom subprocess import check_call, check_output, PIPE, Popen, getoutput, CalledProcessError\n\ndef grab_nearest_tss_from_peak(macs_peaks, genome_tss, outdir):\n # Grab nearest tss from peak\n outfile = os.path.join(outdir, os.path.basename(macs_peaks) + \".candidateRegions.bed\")\n files = pd.read_csv(outfile, sep=\"\\t\")\n annotated_peaks = os.path.join(outdir, os.path.basename(macs_peaks) + \".annotated_peaks.bed\")\n sort_command = \"sort -k1,1 -k2,2n {genome_tss} > {genome_tss}.sorted\"\n sort_command = sort_command.format(**locals())\n p = Popen(sort_command, stdout=PIPE, stderr=PIPE, shell=True)\n print(\"Running:\" + sort_command)\n (stdoutdata, stderrdata) = p.communicate()\n err = str(stderrdata, 'utf-8')\n\n command = \"bedtools closest -a {outfile} -b {genome_tss}.sorted -d > {annotated_peaks}\"\n command = command.format(**locals())\n p = Popen(command, stdout=PIPE, stderr=PIPE, shell=True)\n print(\"Running:\" + command)\n (stdoutdata, stderrdata) = p.communicate()\n err = str(stderrdata, 'utf-8')\n\n return stdoutdata\n\n# Generates QC Prediction Metrics:\ndef GrabQCMetrics(prediction_df, outdir):\n GeneCounts = prediction_df.groupby(['TargetGene']).size()\n GeneCounts.to_csv(os.path.join(outdir,\"EnhancerPerGene.txt\"), sep=\"\\t\")\n\n # Grab Number of Enhancers Per Gene\n GeneMedian = prediction_df.groupby(['TargetGene']).size().median()\n GeneMean = prediction_df.groupby(['TargetGene']).size().mean()\n GeneStdev = prediction_df.groupby(['TargetGene']).size().std()\n\n \n #Grab Number of genes per enhancers\n num_enhancers = prediction_df[['chr', 'start', 'end']].groupby(['chr', 'start', 'end']).size()\n num_enhancers.to_csv(os.path.join(outdir,\"GenesPerEnhancer.txt\"), sep=\"\\t\")\n mean_genes_per_enhancer = prediction_df[['chr', 'start', 'end']].groupby(['chr', 'start', 'end']).size().mean()\n stdev_genes_per_enhancer = prediction_df[['chr', 'start', 'end']].groupby(['chr', 'start', 'end']).size().std()\n median_genes_per_enhancer = prediction_df[['chr', 'start', 'end']].groupby(['chr', 'start', 'end']).size().median()\n # Grab Number of Enhancer-Gene Pairs Per Chromsome\n enhancergeneperchrom = prediction_df.groupby(['chr']).size()\n enhancergeneperchrom.to_csv(os.path.join(outdir, \"EnhancerGenePairsPerChrom.txt\"), sep=\"\\t\")\n mean_enhancergeneperchrom = prediction_df.groupby(['chr']).size().mean()\n stdev_enhancergeneperchrom = prediction_df.groupby(['chr']).size().std()\n median_enhancergeneperchrom = prediction_df.groupby(['chr']).size().median()\n\n # Enhancer-Gene Distancee\n distance = np.array(prediction_df['distance'])\n thquantile = np.percentile(distance, 10)\n testthquantile = np.percentile(distance, 90)\n\n # Quantile Normalization Plots\n#3 title=\"_QuantileNorm\"\n # PlotQuantilePlot(EnhancerList, title, outdir)\n # Plot Distributions and save as png\n PlotDistribution(num_enhancers, \"NumberOfGenesPerEnhancer\", outdir)\n PlotDistribution(GeneCounts, \"NumberOfEnhancersPerGene\", outdir)\n PlotDistribution(enhancergeneperchrom, \"EnhancersPerChromosome\", outdir)\n PlotDistribution(distance, \"EnhancerGeneDistance\", outdir)\n\n with open(os.path.join(outdir,\"QCSummary.txt\"), \"w\") as f:\n f.write(\"Enhancer Per Gene:\")\n f.write(str(GeneMedian))\n f.write(\"\\t\")\n f.write(str(GeneMean))\n f.write(\"\\t\")\n f.write(str(GeneStdev))\n f.write(\"\\n\")\n f.write(\"Genes Per Enhancer:\")\n f.write(str(median_genes_per_enhancer))\n f.write(\"\\t\")\n f.write(str(mean_genes_per_enhancer))\n f.write(\"\\t\")\n f.write(str(stdev_genes_per_enhancer))\n f.write(\"\\n\")\n f.write(\"E-G distance:\")\n f.write(str(np.median(distance)))\n f.write(\"\\t\")\n f.write(str(np.mean(distance)))\n f.write(\"\\t\")\n f.write(str(np.std(distance)))\n f.write(\"\\n\")\n f.write(\"Number of Enhancers/Chrom:\")\n f.write(str(median_enhancergeneperchrom))\n f.write(\"\\t\")\n f.write(str(mean_enhancergeneperchrom))\n f.write(\"\\t\")\n f.write(str(stdev_enhancergeneperchrom))\n f.write(\"\\n\")\n f.write(\"E-G 10th quantile:\")\n f.write(str(thquantile))\n f.write(\"\\n\")\n f.write(\"E-G 90th quantile:\")\n f.write(str(testthquantile))\n f.close()\n\ndef PlotQuantilePlot(EnhancerList, title, outdir):\n i='DHS'\n ax = sns.scatterplot('DHS.RPM', 'DHS.RPM.quantile', data=EnhancerList)\n ax.set_title(title)\n ax.set_ylabel('RPM.quantile')\n ax.set_xlabel('RPM')\n fig = ax.get_figure()\n outfile = os.path.join(outdir, i+str(title)+\".pdf\")\n fig.savefig(outfile, format='pdf')\n \n i=\"H3K27ac\"\n ax = sns.scatterplot('H3K27ac.RPM', 'H3K27ac.RPM.quantile', data=EnhancerList)\n ax.set_title(title)\n ax.set_ylabel('RPM.quantile')\n ax.set_xlabel('RPM')\n fig = ax.get_figure()\n outfile = os.path.join(outdir, i+str(title)+\".pdf\")\n fig.savefig(outfile, format='pdf')\n\n\n \n\ndef NeighborhoodFileQC(neighborhood_dir, outdir):\n x = glob.glob(os.path.join(neighborhood_dir, \"Enhancers.DHS.*\"))\n y = glob.glob(os.path.join(neighborhood_dir, \"Genes.TSS1kb.DHS.*\"))\n z = glob.glob(os.path.join(neighborhood_dir, \"Genes.DHS.*\"))\n \n data = pd.read_csv(x[0],sep=\"\\t\", header=None)\n data1 = pd.read_csv(y[0],sep=\"\\t\", header=None)\n data2 = pd.read_csv(z[0],sep=\"\\t\", header=None)\n\n counts = data.iloc[:, 3].sum()\n counts2 = data1.iloc[:,3].sum()\n counts3 = data2.iloc[:,3].sum()\n with open(os.path.join(outdir, \"PeakFileQCSummary.txt\"), \"a\") as f:\n f.write(\"Counts in Enhancers/GenesTSS/Genes:\")\n f.write(\"\\t\")\n f.write(str(counts))\n f.write(\"\\t\")\n f.write(str(counts2))\n f.write(\"\\t\")\n f.write(str(counts3))\n f.close()\n\n# Generates peak file metrics\ndef PeakFileQC(macs_peaks, outdir):\n if macs_peaks.endswith(\".gz\"):\n peaks = pd.read_csv(macs_peaks, compression=\"gzip\", sep=\"\\t\", header=None)\n else:\n peaks = pd.read_csv(macs_peaks, sep=\"\\t\", header=None)\n outfile = os.path.join(outdir, os.path.basename(macs_peaks) + \".candidateRegions.bed\")\n candidateRegions = pd.read_csv(outfile, sep=\"\\t\", header=None)\n candidateRegions['dist'] = candidateRegions[2] - candidateRegions[1]\n candreg = list(candidateRegions['dist'])\n PlotDistribution(candreg, 'WidthOfCandidateRegions', outdir)\n\n annotatedFile = os.path.join(outdir, os.path.basename(macs_peaks) + \".annotated_peaks.bed\")\n annotatedPeaks = pd.read_csv(annotatedFile, sep=\"\\t\", header=None)\n median = annotatedPeaks.iloc[:, 9].median()\n mean = annotatedPeaks.iloc[:, 9].mean()\n stdev = np.std(np.array(annotatedPeaks.iloc[:, 9]))\n PlotDistribution(np.array(annotatedPeaks.iloc[:,9]), \"DistanceOfPeakToClosestTSS\", outdir)\n\n peaks['dist'] = peaks[2]-peaks[1]\n peaks_array = list(peaks['dist'])\n PlotDistribution(peaks_array, \"WidthOfPeaks\", outdir)\n\n with open(os.path.join(outdir, \"PeakFileQCSummary.txt\"),\"w\") as f:\n f.write(str(macs_peaks))\n f.write(\"\\n\")\n f.write(\"Number of peaks:\")\n f.write(str(len(peaks['dist'])))\n f.write(\"\\n\")\n f.write(\"Median width of peak:\")\n f.write(str((peaks['dist'].median())))\n f.write(\"\\t\")\n f.write(str(peaks['dist'].mean()))\n f.write(\"\\t\")\n f.write(str(peaks['dist'].std()))\n f.write(\"\\n\")\n f.write(\"Median Distance of Peak to Closest TSS:\")\n f.write(str(median))\n f.write(\"\\t\")\n f.write(str(mean))\n f.write(\"\\t\")\n f.write(str(stdev))\n f.write(\"\\n\")\n f.write(\"Number of Candidate Regions:\")\n f.write(str(len(candidateRegions['dist'])))\n f.write(\"\\n\")\n f.write(\"Median width of Candidate Regions:\")\n f.write(str((peaks['dist'].median())))\n f.write(\"\\t\")\n f.write(str(peaks['dist'].mean()))\n f.write(\"\\t\")\n f.write(str(peaks['dist'].std()))\n f.write(\"\\n\")\n f.close()\n\n# Plots and saves a distribution as *.png\ndef PlotDistribution(array, title, outdir):\n ax = sns.distplot(array)\n ax.set_title(title)\n ax.set_ylabel('Estimated PDF of distribution')\n ax.set_xlabel('Counts')\n fig = ax.get_figure()\n outfile = os.path.join(outdir, str(title)+\".pdf\")\n fig.savefig(outfile, format='pdf')\n","sub_path":"diffpval/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":8462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"313382356","text":"'''\nSetup file for sophon\n'''\nfrom __future__ import print_function\nimport os\nimport shutil\nfrom distutils.core import setup, Extension\nfrom setuptools import find_packages\n\n# check sail pylib status\n# AARCH64_PATH = '../../build/lib/sail.cpython-35m-x86_64-linux-gnu.so'\nAARCH64_PATH = '../../../build/lib/sail.so'\nDST_PATH = './sophon'\n\nfilehandle = open(\"../../../git_version\",\"r\");\ngit_version = filehandle.readline();\nprint(git_version);\n\nif os.path.exists(\"./dist\"):\n os.system(\"rm -f ./dist/*\")\n\nif os.path.exists(DST_PATH):\n objs = os.listdir(DST_PATH)\n for obj in objs:\n if obj[-3:] == \".so\":\n print(\"remove file: {}\".format(obj))\n os.remove(os.path.join(DST_PATH,obj))\n\nif os.path.exists(AARCH64_PATH):\n try:\n shutil.copy(AARCH64_PATH, DST_PATH)\n except shutil.SameFileError:\n pass\n\n pyi_name = \"sophon/sail.pyi\"\n shutil.copy(\"../../../src/sail.pyi\",pyi_name)\n\n # sophon_aarch64 python module\n PACKAGES_AARCH64 = ['sophon', 'sophon.auto_runner', 'sophon.auto_runner.common',\n 'sophon.auto_runner.runner', 'sophon.auto_runner.external',\n 'sophon.utils', 'sophon.algokit',\n 'sophon.algokit.algo_cv', 'sophon.algokit.algo_cv.cls',\n 'sophon.algokit.algo_cv.det', 'sophon.algokit.algo_cv.seg',\n 'sophon.algokit.algo_nlp', 'sophon.algokit.algo_speech',\n 'sophon.algokit.algofactory',\n 'sophon.algokit.engine', 'sophon.algokit.libs',\n 'sophon.algokit.libs.extend_layer', 'sophon.algokit.utils']\n setup(name='sophon_arm',\n version=git_version,\n description='Inference samples for deep learning on Sophon products.',\n author='Sophon algorithm team',\n url='https://github.com/sophon-ai-algo/sophon-inference',\n long_description='''\n Guide to deploying deep-learning inference networks and deep vision primitives on Sophon TPU.\n ''',\n packages=PACKAGES_AARCH64,\n data_files = [pyi_name],\n include_package_data=True)\nelse:\n raise FileNotFoundError(\"sail lib not found\")\n","sub_path":"python/soc/arm/setup_arm.py","file_name":"setup_arm.py","file_ext":"py","file_size_in_byte":2124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"561086441","text":"import csv\nimport json\nimport sys\nimport os\nimport math\nimport numpy\nimport datetime\nimport random\nimport pandas as pd\nimport yaml\nimport matplotlib.pyplot as plt\nfrom pyutilib.misc import Options\nfrom pyutilib.misc.timing import TicTocTimer\nfrom pyomo.environ import value\n\nfrom .util import factorial_iterator\nfrom .util import ToStr_JSONEncoder\nfrom . import formulations as frms\n\n\n#\n# Load data from a CSV file generated by 'epiinf collect'\n#\ndef load_csv_data(input_csv):\n df = pd.read_csv(input_csv, parse_dates=['Date'])\n df = df.set_index('Date')\n\n metadata = {}\n metafile = input_csv[:-4]+\"_meta.yml\"\n if os.path.isfile(metafile):\n with open(metafile, 'r') as INPUT:\n try:\n metadata = yaml.safe_load(INPUT)\n except yaml.YAMLError as exc: # pragma: no cover\n print(\"ERROR: problem parsing YAML file \"+metafile)\n print(exc)\n sys.exit(1)\n\n return df, metadata\n\n#\n# Load geodata from a CSV file\n#\n\"\"\"\nTODO - delete\n\ndef load_geodata(geodata_csv):\n if not os.path.exists(geodata_csv): # pragma: no cover\n print(\"ERROR: missing file \"+geodata_csv)\n sys.exit(1)\n return pd.read_csv(geodata_csv, index_col='geoid')\n\"\"\"\n\n#\n# Process the arguments used for inference\n#\ndef process_config(cfg):\n config = Options()\n\n config.formulation = cfg['formulation']\n config.ntrials = cfg.get('ntrials', None)\n config.input_csv = cfg['input_csv']\n #config.output_json = cfg.get('output_json',None)\n config.population = cfg.get('population', None)\n config.filter_counties_by_cases = cfg.get('filter_counties_by_cases', 0)\n config.county = cfg.get('county', None)\n config.column = cfg.get('column', None)\n config.reporting_factor = cfg.get('reporting_factor', 1.0)\n config.deltaP = cfg.get('deltaP', 7)\n config.sigma = cfg.get('sigma', None)\n config.gamma = cfg.get('gamma', None)\n config.mobility_json = cfg.get('mobility_json', None)\n config.factor_levels = cfg.get('factor_levels', None)\n config.bootstrap = cfg.get('bootstrap', Options())\n config.analysis_window = cfg.get('analysis_window', Options())\n\n # TODO - deprecate the use of the geodata CSV file option\n if 'population_csv' not in cfg:\n config.population_csvfile = cfg.get('geodata_csv', config.input_csv[:-4] + \"_geodata.csv\")\n config.population_csvcolumn = 'pop2010'\n config.population_csvindex = 'geoid'\n else:\n config.population_csvfile = cfg['population_csv']['file']\n config.population_csvcolumn = cfg['population_csv']['population']\n config.population_csvindex = cfg['population_csv']['index']\n\n return config\n\ndef check_config(config):\n if config.county is not None and type(config.county) is not str: # pragma: no cover\n print(\"ERROR: county id must be specified as a string\")\n sys.exit(1)\n try:\n assert(os.path.exists(config.input_csv))\n except: # pragma: no cover\n print(\"ERROR: input file \"+config.input_csv+\" does not exist\")\n raise\n assert type(config.reporting_factor) is float\n \ndef run_single_node_from_config(df, population_df, CONFIG, verbose):\n column = CONFIG.column\n ntrials = CONFIG.get('ntrials', df.columns)\n formulation = CONFIG.formulation\n filter_counties_by_cases = CONFIG.filter_counties_by_cases\n population_config = CONFIG.population\n population_csvcolumn = CONFIG.population_csvcolumn\n sigma = CONFIG.sigma\n gamma = CONFIG.gamma\n report_delay = CONFIG.deltaP\n reporting_factor = CONFIG.reporting_factor\n analysis_window = CONFIG.analysis_window\n\n all_results = list()\n ndx = 0\n\n for t in df:\n if column is not None and t != column:\n continue\n ndx = ndx+1\n if not ntrials is None and ndx > ntrials:\n break\n\n if population_config is None:\n if t not in population_df[population_csvcolumn]: # pragma: no cover\n print(\"WARNING: county \"+str(t)+\" does not have population data available.\")\n continue\n population = population_df[population_csvcolumn][t]\n else:\n population = population_config\n cm_rep_cases = df[t].to_list()\n Cdates = df.index.to_list()\n\n #if df[t][-1] == 0:\n # results = {'est_beta':None, 'status':'skipped', 'msg':'No case data', 'population': population, 'total_cases': float(df[t][-1])}\n\n if df[t][-1] <= filter_counties_by_cases:\n results = {'est_beta':None, 'status':'skipped', 'msg':'cumulative cases <= {} (filter_counties_by_cases)'.format(filter_counties_by_cases), 'population': population, 'total_cases': float(df[t][-1])}\n\n elif formulation == 'decay-lsq':\n results = frms.run_decay_lsq(cm_rep_cases=cm_rep_cases,\n population=population,\n sigma=sigma,\n gamma=gamma,\n report_delay=report_delay,\n reporting_factor=reporting_factor,\n analysis_window=analysis_window,\n Cdates=Cdates)\n elif formulation == 'decay-blike':\n results = frms.run_decay_blike(cm_rep_cases=cm_rep_cases,\n population=population,\n sigma=sigma,\n gamma=gamma,\n report_delay=report_delay,\n reporting_factor=reporting_factor,\n analysis_window=analysis_window,\n Cdates=Cdates)\n \"\"\"\n elif formulation == 'decay-multibeta-lsq':\n results = frms.run_decay_multibeta_lsq(cm_rep_cases=cm_rep_cases,\n population=population,\n sigma=sigma,\n gamma=gamma,\n deltaP=report_delay,\n reporting_factor=reporting_factor)\n # analysis_window=analysis_window)\n \"\"\"\n else: # pragma: no cover\n print(\"ERROR: unknown formulation '%s'\" % formulation)\n sys.exit(1)\n\n results['FIPS'] = t\n #\n # Collect results in a list\n #\n all_results.append( results )\n return all_results\n\ndef run_multinode_from_config(df, population_df, CONFIG, verbose):\n formulation = CONFIG.formulation\n sigma = CONFIG.sigma\n gamma = CONFIG.gamma\n filter_counties_by_cases = CONFIG.filter_counties_by_cases\n report_delay = CONFIG.deltaP\n reporting_factor = CONFIG.reporting_factor\n analysis_window = CONFIG.analysis_window\n mobility_dict = CONFIG.mobility_dict\n bootstrap = CONFIG.bootstrap\n bootstrap_percentile = bootstrap.get('percentile',5)\n bootstrap_n = bootstrap.get('n',100)\n bootstrap_seed = bootstrap.get('seed',None)\n bootstrap_value = bootstrap.get('value','est_beta')\n bootstrap_output_csv = bootstrap.get('output_csv', None)\n bootstrap_weighted = bootstrap.get('weighted', False)\n\n #\n # Error checking\n #\n nodes = [val for val in df.keys().to_list()]\n flag=False\n active_nodes = []\n for n in nodes:\n if not n in population_df: # pragma: no cover\n flag=True\n print(\"Population is missing for county: \"+str(n))\n if df[n][-1] > filter_counties_by_cases:\n active_nodes.append(n)\n else:\n print(\"WARNING: Skipping county '\"+str(n)+\"' in multinode estimation because it has no cases\")\n if flag: # pragma: no cover\n sys.exit(1)\n if len(active_nodes) == 0:\n return {'fraction_of_counties_with_cases': 0, 'est_beta':None}\n\n if bootstrap:\n if bootstrap_weighted:\n bootstrap_weights = population_df[active_nodes].copy()\n else:\n bootstrap_weights = population_df[active_nodes].copy()\n bootstrap_weights[bootstrap_weights.index] = 1\n\n #testing_bootstrap = population_df[active_nodes].copy()\n #testing_bootstrap[testing_bootstrap.index] = 0\n\n if bootstrap_seed is not None:\n random.seed(bootstrap_seed)\n\n all_results = []\n for i in range(bootstrap_n):\n DF = df.sample(n=len(df.keys()), replace=True, axis=1, random_state=random.randint(1000000,9999999), weights=bootstrap_weights)\n sampled_nodes = [val for val in DF.keys().to_list()]\n\n #for n in sampled_nodes:\n # testing_bootstrap[n] = testing_bootstrap[n] + 1\n populations = population_df[sampled_nodes]\n\n if formulation == 'multinode-decay-lsq':\n results = frms.run_multinode_decay_lsq(DF,\n populations=populations,\n sigma=sigma,\n gamma=gamma,\n report_delay=report_delay,\n analysis_window=dict(),\n reporting_factor=reporting_factor,\n Cdates=df.index.tolist())\n elif formulation == 'multinode-decay-blike':\n results = frms.run_multinode_decay_blike(DF,\n population=populations,\n sigma=sigma,\n gamma=gamma,\n report_delay=report_delay,\n analysis_window=dict(),\n reporting_factor=reporting_factor,\n Cdates=df.index.tolist())\n# elif formulation == 'multinode-decay-multibeta-lsq':\n# results = frms.run_multinode_decay_multibeta_lsq(DF,\n# population=populations,\n# sigma=sigma,\n# gamma=gamma,\n# deltaP=report_delay,\n# reporting_factor=reporting_factor)\n else:\n raise RuntimeError(\"Unknown formulation: \"+formulation)\n all_results.append(results)\n\n #print(testing_bootstrap/ (len(testing_bootstrap)*bootstrap_n))\n #print(bootstrap_weights/ sum(bootstrap_weights))\n #\n # Do the estimate with all the data\n #\n\n if formulation == 'multinode-decay-lsq':\n results = frms.run_multinode_decay_lsq(df[active_nodes],\n populations=population_df[active_nodes],\n sigma=sigma,\n gamma=gamma,\n report_delay=report_delay,\n reporting_factor=reporting_factor,\n analysis_window=dict(),\n Cdates=df.index.tolist())\n elif formulation == 'multinode-decay-blike':\n results = frms.run_multinode_decay_blike(df[active_nodes],\n population=population_df[active_nodes],\n sigma=sigma,\n gamma=gamma,\n deltaP=report_delay,\n reporting_factor=reporting_factor,\n analysis_window=dict(),\n Cdates=df.index.tolist())\n elif formulation == 'multinode-mobility-decay-lsq':\n results = frms.run_multinode_mobility_decay_lsq(df[active_nodes],\n populations=population_df[active_nodes],\n mobility=mobility_dict,\n sigma=sigma,\n gamma=gamma,\n report_delay=report_delay,\n reporting_factor=reporting_factor,\n analysis_window=analysis_window,\n Cdates=df.index.tolist())\n elif formulation == 'multinode-mobility-window-decay-lsq':\n results = frms.run_multinode_mobility_window_decay_lsq(df[active_nodes],\n populations=population_df[active_nodes],\n mobility=mobility_dict,\n sigma=sigma,\n gamma=gamma,\n report_delay=report_delay,\n reporting_factor=reporting_factor,\n analysis_window=analysis_window,\n Cdates=df.index.tolist())\n elif formulation == 'multinode-mobility-time-varying-decay-lsq':\n results = frms.run_multinode_mobility_time_varying_decay_lsq(df[active_nodes],\n populations=population_df[active_nodes],\n mobility=mobility_dict,\n sigma=sigma,\n gamma=gamma,\n report_delay=report_delay,\n reporting_factor=reporting_factor,\n analysis_window=analysis_window,\n Cdates=df.index.tolist())\n# elif formulation == 'multinode-decay-multibeta-lsq':\n# results = frms.run_multinode_decay_multibeta_lsq(df[active_nodes],\n# population=population_df[active_nodes],\n# sigma=sigma,\n# gamma=gamma,\n# deltaP=report_delay,\n# reporting_factor=reporting_factor)\n else:\n raise RuntimeError(\"Unknown formulation: \"+formulation)\n\n\n #\n # Compute the confidence interval\n #\n if bootstrap:\n values = [r[bootstrap_value] for r in all_results]\n values.sort()\n results['bootstrap_mean_beta'] = numpy.mean(values)\n #results['bootstrap_mean_beta'] = statistics.mean(values)\n quantiles = [ \n numpy.quantile(values, bootstrap_percentile/100, axis=0),\n numpy.quantile(values, 1.0-bootstrap_percentile/100, axis=0)\n ]\n #quantiles = statistics.quantiles(values, n=100//bootstrap_percentile)\n results['bootstrap_'+str(bootstrap_percentile)+'%'] = quantiles[0]\n results['bootstrap_'+str(100-bootstrap_percentile)+'%'] = quantiles[-1]\n if verbose:\n print(\"Bootstrap Value\")\n print(values)\n print(\"Quantiles\")\n print(quantiles)\n if bootstrap_output_csv is not None:\n bootstrap_df = pd.DataFrame(values, columns=[\"est_beta\"])\n bootstrap_df.to_csv(bootstrap_output_csv, quoting=csv.QUOTE_NONNUMERIC)\n\n assert(len(nodes) == len(df.keys()))\n results['num_counties'] = len(df.keys())\n results['fraction_of_counties_with_cases'] = len(active_nodes)/len(df.keys())\n return [results]\n\ndef run_multibeta_from_config(df, population_df, CONFIG, verbose):\n formulation = CONFIG.formulation\n sigma = CONFIG.sigma\n gamma = CONFIG.gamma\n filter_counties_by_cases = CONFIG.filter_counties_by_cases\n \n report_delay = CONFIG.deltaP\n reporting_factor = CONFIG.reporting_factor\n analysis_window = CONFIG.analysis_window\n\n #\n # Error checking\n #\n nodes = [val for val in df.keys().to_list()]\n flag=False\n active_nodes = []\n for n in nodes:\n if not n in population_df: # pragma: no cover\n flag=True\n print(\"Population is missing for county: \"+str(n))\n if df[n][-1] > filter_counties_by_cases:\n active_nodes.append(n)\n else:\n print(\"WARNING: Skipping county '\"+str(n)+\"' in multinode estimation because it has no cases\")\n if flag: # pragma: no cover\n sys.exit(1)\n if len(active_nodes) == 0:\n return {'fraction_of_counties_with_cases': 0, 'est_beta':{}, 'est_omega':{}}\n\n #\n # Do the estimate with all the data\n #\n if formulation == 'multibeta-singleomega-decay-lsq':\n results = frms.run_multibeta_singleomega_decay_lsq(df[active_nodes],\n populations=population_df[active_nodes],\n sigma=sigma,\n gamma=gamma,\n report_delay=report_delay,\n reporting_factor=reporting_factor,\n analysis_window=dict())\n elif formulation == 'multibeta-singleomegawin-decay-lsq':\n results = frms.run_multibeta_singleomegawin_decay_lsq(df[active_nodes],\n populations=population_df[active_nodes],\n sigma=sigma,\n gamma=gamma,\n report_delay=report_delay,\n reporting_factor=reporting_factor,\n analysis_window=analysis_window)\n elif formulation == 'multibeta-singleomegawin-decay-l1':\n results = frms.run_multibeta_singleomegawin_decay_l1(df[active_nodes],\n populations=population_df[active_nodes],\n sigma=sigma,\n gamma=gamma,\n report_delay=report_delay,\n reporting_factor=reporting_factor,\n analysis_window=analysis_window)\n elif formulation == 'multibeta-multiwin-decay-lsq':\n results = frms.run_multibeta_multiwin_decay_lsq(df[active_nodes],\n populations=population_df[active_nodes],\n sigma=sigma,\n gamma=gamma,\n report_delay=report_delay,\n reporting_factor=reporting_factor,\n analysis_window=analysis_window,\n Cdates=df.index.tolist())\n else:\n raise RuntimeError(\"ERROR: Unknown model - \"+formulation)\n\n assert(len(nodes) == len(df.keys()))\n results['num_counties'] = len(df.keys())\n results['fraction_of_counties_with_cases'] = len(active_nodes)/len(df.keys())\n return [results]\n\n\n\"\"\"\nExample YAML file\ninference:\n - formulation: delay-ln-lsq\n deltaE: 5\n deltaI: 4\n deltaP: 7 # reporting delay (from time of infection to reported case)\n reportingfac: 1.0 # reporting factor (5 means actual cases = 5*reported)\n population: 4500000\n datafilename: data.csv\n daysbefore: 5 # number of days to include before the first case\n daysafter: 28 # number of days of data to include after the first case\n - formulation: delay-lsq\n sigma: 0.1923076923 # 1/5.2\n gamma: 0.25 # 1/4\n deltaP: 7\n population: 4500000\n datafilename: data.csv\n daysbefore: 5 # number of days to include before the first case\n daysafter: 28 # number of days of data to include after the first case\n\"\"\"\ndef run(args):\n with open(args.config_file, 'r') as fd:\n config = yaml.safe_load(fd)\n\n if 'inference' not in config:\n raise ValueError('No \"inference\" key found in the YAML config')\n\n timer = TicTocTimer()\n for cfg in config.get('inference', []):\n timer.tic('Starting Inference')\n verbose = cfg.get('verbose', args.verbose)\n factors = cfg.get('factors', None)\n output_csv = cfg.get('output_csv', None)\n output_json = cfg.get('output_json', None)\n\n assert output_csv is not None or output_json is not None \n assert type(verbose) is bool\n \n config = process_config(cfg)\n if verbose:\n print('Inference Configuration:\\n', config)\n\n all_results = []\n\n if factors is None:\n config_list = [config]\n else:\n config_list = factorial_iterator(factors, config)\n\n for CONFIG in config_list:\n try:\n population_df = pd.read_csv(CONFIG.population_csvfile, encoding=\"ISO-8859-1\", dtype={CONFIG.population_csvindex:'str'})\n population_df = population_df.set_index(CONFIG.population_csvindex)\n except:\n print(\"ERROR reading file \"+CONFIG.population_csvfile)\n raise\n check_config(CONFIG)\n if CONFIG.population is None and CONFIG.county is not None:\n CONFIG.population = population_df[CONFIG.population_csvcolumn][CONFIG.county]\n if verbose:\n print(\"County: %s Population: %s\" % (str(CONFIG.county), str(CONFIG.population)))\n\n print(\"Input File: \"+CONFIG.input_csv+' with column '+str(CONFIG.column))\n #\n # Load the dataframe and experimental metadata (if it's available)\n #\n df, metadata = load_csv_data(CONFIG.input_csv)\n data = metadata.get('simulation parameters', None)\n if data is not None:\n if CONFIG.verbose:\n print('parameters used to create data')\n print(data)\n for key, value in data.items():\n CONFIG[key] = value\n\n #\n # load mobility data if needed\n #\n CONFIG.mobility_dict = {}\n if CONFIG.mobility_json is not None:\n try:\n with open(CONFIG.mobility_json, 'r') as fd:\n CONFIG.mobility_dict = json.load(fd)\n except:\n print(\"ERROR reading file \" + CONFIG.mobility_json)\n raise\n \n #\n # Execute inference\n #\n if CONFIG.formulation in ['decay-lsq', 'decay-blike', 'decay-multibeta-lsq']:\n results = run_single_node_from_config(df, population_df, CONFIG, verbose)\n elif CONFIG.formulation in ['multinode-mobility-time-varying-decay-lsq', 'multinode-mobility-window-decay-lsq', 'multinode-mobility-decay-lsq', 'multinode-decay-lsq', 'multinode-decay-blike', 'multinode-decay-multibeta-lsq']:\n results = run_multinode_from_config(df, population_df[CONFIG.population_csvcolumn], CONFIG, verbose)\n elif CONFIG.formulation.startswith('multibeta-'):\n results = run_multibeta_from_config(df, population_df[CONFIG.population_csvcolumn], CONFIG, verbose)\n else:\n raise ValueError('Invalid formulation', CONFIG.formulation, 'found in YAML file inference section.')\n #\n # Augment reported results\n #\n for trial in results:\n if data is not None:\n trial['est_R0'] = trial['est_beta']/float(data['gamma'])\n for key, value in data.items():\n if not key in trial:\n trial[key] = value\n if CONFIG.factor_levels is not None:\n for key, value in CONFIG.factor_levels.items():\n if not key in trial:\n trial[key] = value\n all_results.append( trial )\n \n #\n # Save results\n #\n if output_csv:\n print(\"Writing results in file \"+output_csv)\n filedir = os.path.dirname(output_csv)\n if not os.path.exists(filedir):\n os.makedirs(filedir)\n all_df = pd.DataFrame(all_results)\n all_df.to_csv(output_csv, index=False, quoting=csv.QUOTE_NONNUMERIC)\n else:\n print(\"Writing results in file \"+output_json)\n filedir = os.path.dirname(output_json)\n if not os.path.exists(filedir):\n os.makedirs(filedir)\n with open(output_json, 'w') as OUTPUT:\n tmp = json.dumps(all_results, indent=4, cls=ToStr_JSONEncoder)\n OUTPUT.write(tmp)\n all_df = None\n #\n # Create a YAML file with metadata\n #\n metadata = {}\n metadata['timestamp'] = str(datetime.datetime.now())\n metadata['configuration'] = {}\n for key in cfg:\n metadata['configuration'][key] = cfg[key]\n if output_csv:\n metaoutput = output_csv[:-4]+\"_meta.yml\" \n elif output_json.endswith('.json'):\n metaoutput = output_json[:-5]+\"_meta.yml\" \n elif output_json.endswith('.jsn'):\n metaoutput = output_json[:-4]+\"_meta.yml\" \n print(\"Writing results metadata in file \"+metaoutput)\n with open(metaoutput, 'w') as OUTPUT:\n yaml.dump(metadata, OUTPUT)\n #\n # Print data\n #\n if verbose:\n if all_df is None:\n print(json.dumps(all_results, indent=4, cls=ToStr_JSONEncoder))\n else:\n pd.set_option('display.max_rows', 100000)\n print(all_df)\n\n timer.toc('Completed Inference')\n\n timer.tic('Completed All Inference Computations')\n\n\n\"\"\"\nThis is the previous version of 'run'. I'm caching this here so we can\nquickly go back to it.\n\nExample YAML file\ninference:\n - formulation: delay-ln-lsq\n deltaE: 5\n deltaI: 4\n deltaP: 7 # reporting delay (from time of infection to reported case)\n reportingfac: 1.0 # reporting factor (5 means actual cases = 5*reported)\n population: 4500000\n datafilename: data.csv\n daysbefore: 5 # number of days to include before the first case\n daysafter: 28 # number of days of data to include after the first case\n - formulation: delay-lsq\n sigma: 0.1923076923 # 1/5.2\n gamma: 0.25 # 1/4\n deltaP: 7\n population: 4500000\n datafilename: data.csv\n daysbefore: 5 # number of days to include before the first case\n daysafter: 28 # number of days of data to include after the first case\n\ndef run_old(args):\n with open(args.config_file, 'r') as fd:\n config = yaml.safe_load(fd)\n\n if 'inference' and 'batch_inference' not in config:\n raise ValueError('No inference or batch_inference key found in the YAML config')\n\n for cfg in config.get('inference', []):\n all_results = list()\n verbose = cfg.get('verbose', args.verbose)\n assert type(verbose) is bool\n \n if verbose:\n print('Inference Configuration:', cfg)\n \n formulation = cfg['formulation']\n ntrials = cfg.get('ntrials', None)\n\n input_csv = cfg['input_csv']\n assert(os.path.exists(input_csv))\n geodata_csv = input_csv[:-4] + \"_geodata.csv\"\n geodata_df = load_geodata(geodata_csv)\n\n population = cfg.get('population', None)\n county = cfg.get('county', None)\n assert(not ((population is None) and (county is None)))\n\n column = cfg.get('column', None)\n reporting_factor = cfg.get('reporting_factor', 1.0)\n assert type(reporting_factor) is float\n\n print(\"Input File: \"+input_csv+' with column '+str(column))\n if formulation == 'decay-lsq' or formulation == 'decay-blike':\n df, metadata = load_csv_data(input_csv)\n data = metadata['simulation parameters']\n ntrials = cfg.get('ntrials', df.columns)\n if verbose:\n print('parameters used to create data')\n print(data)\n\n if population is None:\n population = geodata_df['pop2010'][county]\n if verbose:\n print(\"County: %s Population: %s\" % (str(county), str(population)))\n\n ndx = 0\n for t in df:\n if column is not None and t != column:\n continue\n \n ndx = ndx+1\n if not ntrials is None and ndx > ntrials:\n break\n\n # get the data - this will change with Bill's new stuff\n cm_rep_cases = df[t].to_list()\n\n sigma = cfg['sigma']\n gamma = cfg['gamma']\n deltaP = cfg['deltaP']\n\n if formulation == 'decay-lsq':\n results = frms.run_decay_lsq(cm_rep_cases=cm_rep_cases,\n population=population,\n sigma=sigma,\n gamma=gamma,\n deltaP=deltaP,\n reporting_factor=reporting_factor)\n else:\n # formulation == 'decay-blike'\n results = frms.run_decay_blike(cm_rep_cases=cm_rep_cases,\n population=population,\n sigma=sigma,\n gamma=gamma,\n deltaP=deltaP,\n reporting_factor=reporting_factor)\n\n results['est_R0'] = results['est_beta']/float(data['gamma'])\n results['true_beta'] = float(data['beta'])\n results['true_R0'] = float(data['beta'])/float(data['gamma'])\n all_results.append(results)\n\n elif formulation == 'delay-ln-lsq':\n raise NotImplementedError('Formulation ' + formulation + ' is not ready.')\n df, metadata = load_csv_data(input_csv)\n data = metadata['simulation parameters']\n if population is None:\n population = geodata_df['pop2010'][county]\n if verbose:\n print(\"County: %s Population: %s\" % (str(county), str(population)))\n if verbose:\n print('parameters used to create data')\n print(data)\n for col in df:\n # get the data - this will change with Bill's new stuff\n cm_rep_cases = df[col].to_list()\n\n deltaE = cfg['deltaE']\n assert type(deltaE) is int and deltaE > 0\n deltaI = cfg['deltaI']\n assert type(deltaI) is int and deltaI > 0\n deltaP = cfg['deltaP']\n assert type(deltaP) is int and deltaP > 0\n\n results = frms.run_delay_ln_lsq(cm_rep_cases=cm_rep_cases,\n population=population,\n deltaE=deltaE,\n deltaI=deltaI,\n deltaP=deltaP,\n reporting_factor=reporting_factor,\n geodata_df=geodata_df)\n\n results['est_R0'] = results['est_beta']/float(data['gamma'])\n results['true_beta'] = float(data['beta'])\n results['true_R0'] = float(data['beta'])/float(data['gamma'])\n print(results)\n\n elif formulation == 'delay-lsq':\n #all_results = list()\n df, metadata = load_csv_data(input_csv)\n data = metadata['simulation parameters']\n if population is None:\n population = geodata_df['pop2010'][county]\n if verbose:\n print(\"County: %s Population: %s\" % (str(county), str(population)))\n if verbose:\n print('parameters used to create data')\n print(data)\n for col in df:\n # get the data - this will change with Bill's new stuff\n cm_rep_cases = df[col].to_list()\n\n deltaE = cfg['deltaE']\n assert type(deltaE) is int and deltaE > 0\n deltaI = cfg['deltaI']\n assert type(deltaI) is int and deltaI > 0\n deltaP = cfg['deltaP']\n assert type(deltaP) is int and deltaP > 0\n\n results = frms.run_delay_lsq(cm_rep_cases=cm_rep_cases,\n population=population,\n deltaE=deltaE,\n deltaI=deltaI,\n deltaP=deltaP,\n reporting_factor=reporting_factor,\n geodata_df=geodata_df)\n\n results['est_R0'] = results['est_beta']/float(data['gamma'])\n results['true_beta'] = float(data['beta'])\n results['true_R0'] = float(data['beta'])/float(data['gamma'])\n\n else:\n raise ValueError('Invalid formulation', formulation, 'found in YAML file inference section.')\n\n all_df = pd.DataFrame(all_results)\n all_df.to_csv('inference_tests.csv', index=False, quoting=csv.QUOTE_NONNUMERIC)\n pd.set_option('display.max_rows', 100000)\n print(all_df)\n\n #\n # Process batch_inference blocks\n #\n for cfg in config.get('batch_inference', []):\n verbose = cfg.get('verbose', args.verbose)\n factors = cfg.get('factors', None)\n config = cfg.get('config', None)\n output = cfg.get('output', None)\n\n assert factors is not None\n assert config is not None\n assert output is not None\n assert type(verbose) is bool\n \n config = process_config(config)\n if verbose:\n print('Inference Configuration:\\n', config)\n all_results = list()\n\n for CONFIG in factorial_iterator(factors, config):\n geodata_df = load_geodata(CONFIG.geodata_csv)\n if CONFIG.population is None:\n CONFIG.population = geodata_df['pop2010'][CONFIG.county]\n if verbose:\n print(\"County: %s Population: %s\" % (str(CONFIG.county), str(CONFIG.population)))\n\n check_config(CONFIG)\n print(\"Input File: \"+CONFIG.input_csv+' with column '+str(CONFIG.column))\n\n if CONFIG.formulation == 'decay-lsq' or CONFIG.formulation == 'decay-blike':\n all_results.extend( run_decay_lsq(CONFIG, verbose) )\n else:\n raise ValueError('Invalid formulation', CONFIG.formulation, 'found in YAML file inference section.')\n\n all_df = pd.DataFrame(all_results)\n #\n # Save results\n #\n print(\"Writing results in file \"+output)\n all_df.to_csv(output, index=False, quoting=csv.QUOTE_NONNUMERIC)\n #\n # Create a YAML file with metadata\n #\n metadata = {}\n metadata['timestamp'] = str(datetime.datetime.now())\n metadata['configuration'] = {}\n for key in cfg:\n metadata['configuration'][key] = cfg[key]\n metaoutput = output[:-4]+\"_meta.yml\" \n print(\"Writing results metadata in file \"+metaoutput)\n with open(metaoutput, 'w') as OUTPUT:\n yaml.dump(metadata, OUTPUT)\n #\n # Print data\n #\n pd.set_option('display.max_rows', 100000)\n print(all_df)\n\"\"\"\n\n\"\"\"\n df = pd.DataFrame(results)\n if args.resfile is not None:\n df.to_csv(args.resfile, index=False)\n\n print(df)\n print('Mean values:')\n print(df.mean())\n print(df.std())\n df.hist()\n #plt.title('foo')\n #plt.show()\n\"\"\"\n","sub_path":"epi_inference/ATTIC/inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":37419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"566406045","text":"import sys\r\n\r\ndef suma(num1, num2):\r\n\treturn num1+num2\r\n\r\ndef resta(num1, num2):\r\n\treturn num1-num2\r\n\r\ndef multiplica(num1, num2):\r\n\treturn num1*num2\r\n\r\ndef divide(num1,num2):\t\t\r\n\t\r\n\ttry:\r\n\t\treturn num1/num2\r\n\texcept ZeroDivisionError:\r\n\r\n\t\tprint(\"no se puede dividir entre cero\")\r\n\t\treturn \"Operacion erronea\"\r\n\r\ncont = 0\r\nwhile True:\r\n\r\n\ttry:\r\n\t\top1=(int(input(\"Introduce el primer número: \")))\r\n\t\top2=(int(input(\"Introduce el segundo número: \")))\r\n\r\n\t\tbreak\r\n\t\t\r\n\r\n\texcept:\r\n\t\tprint(\"Los valores introducidos no son correctos\")\r\n\t\tcont = cont + 1\r\n\t\tif cont == 3:\r\n\t\t\tprint(\"Se han realizado demasiados intentos, ejecute el programa nuevamente\")\r\n\t\t\tsys.exit()\t\t\r\n\r\noperacion=input(\"Introduce la operación a realizar (suma,resta,multiplica,divide): \")\r\n\r\nif operacion==\"suma\":\r\n\tprint(suma(op1,op2))\r\n\r\nelif operacion==\"resta\":\r\n\tprint(resta(op1,op2))\r\n\r\nelif operacion==\"multiplica\":\r\n\tprint(multiplica(op1,op2))\r\n\r\nelif operacion==\"divide\":\r\n\tprint(divide(op1,op2))\r\n\r\nelse:\r\n\tprint (\"Operación no contemplada\")\r\n\r\n\r\nprint(\"Operación ejecutada. Continuación de ejecución del programa \")","sub_path":"Python/AP-Python/Curso Tutorizado Python/Python-pildoras/Trabajo con excepciones.py","file_name":"Trabajo con excepciones.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"159977354","text":"import json\r\nimport requests\r\nfrom requests.exceptions import RequestException\r\nfrom bs4 import BeautifulSoup\r\nfrom multiprocessing import Pool\r\n\r\n\r\ndef get_one_page(url):\r\n try:\r\n response = requests.get(url)\r\n if response.status_code == 200:\r\n return response.text\r\n except RequestException:\r\n return None\r\n\r\ndef prase_one_apge(html):\r\n soup = BeautifulSoup(html,'lxml')\r\n items=soup.select('dl > dd')\r\n for item in items:\r\n yield {\r\n 'title': item.find('a').get(\"title\"),\r\n 'img' : item.find('img',{'class':\"board-img\"}).get(\"data-src\"),\r\n 'star': item.find('p',{'class':'star'}).get_text().strip()[3:],\r\n 'time':item.find('p',{'class':'releasetime'}).get_text()[5:],\r\n 'score':item.find('i',{'class':'integer'}).get_text()+item.find('i',{'class':'fraction'}).get_text()\r\n }\r\n\r\ndef write_to_txt(content):\r\n with open('result.txt','a',encoding='utf8') as f:\r\n f.write(json.dumps(content,ensure_ascii=False)+'\\n')\r\n f.close()\r\n\r\ndef main(offset):\r\n url = 'http://maoyan.com/board/4?offset='+str(offset)\r\n html = get_one_page(url)\r\n for content in prase_one_apge(html):\r\n print(content)\r\n write_to_txt(content)\r\n\r\nif __name__ == '__main__':\r\n for i in range(10):\r\n main(10*i)","sub_path":"Spider.py","file_name":"Spider.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"581012356","text":"import os\r\nclass Parameters():\r\n def __init__(self):\r\n project_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\r\n self.script_mode = \"eval\" # \"train\" / \"eval\"\r\n self.path_for_eval =\"output_example\\train_50_test_1000_output_example\"\r\n\r\n self.run_name=os.path.join(project_path,\"train20_test_100_threshold5_Atoms200_Anchor2_Alpha2_Iters2000_Epoch1000_Mode1234_lr0.01\")\r\n\r\n # self.run_name=\"conv_train20_test_100_Epoch1000_lr0.01_8_ch\"\r\n\r\n self.base_folder = os.path.join(project_path,\"data_out\")\r\n\r\n self.images_path =os.path.join(project_path,\"data\\t10k-images.idx3-ubyte\")\r\n self.labels_path = os.path.join(project_path,\"data\\t10k-labels.idx1-ubyte\")\r\n\r\n self.ANCHOR_POINTS_STEP= 2\r\n self.IMAGES_TRAIN_PER_CLASS = 20\r\n self.IMAGES_TEST_PER_CLASS = 100\r\n\r\n self.NUM_OF_ATOMS_DICT_1 = 500 # 80\r\n self.NUM_OF_ATOMS_DICT_2 = 250\r\n self.NUM_OF_ATOMS_DICT_3 = 125\r\n self.NUM_OF_ATOMS_DICT_4 = 60\r\n\r\n self.use_another_fc=0\r\n self.dnn_num_ch=8\r\n\r\n # Algorithm used to transform the data\r\n self.transform_algorithm = 'threshold' # 'omp' / 'lars'/ 'threshold'/ lasso_cd\r\n # lars: uses the least angle regression method\r\n # lasso_lars: uses Lars to compute the Lasso solution\r\n # lasso_cd: uses the coordinate descent method to compute the Lasso solution\r\n # omp : uses orthogonal matching pursuit to estimate the sparse solution\r\n # threshold: squashes to zero all coefficients less than alpha from the projection\r\n\r\n\r\n\r\n self.kwargs = {'transform_alpha': 5.0} # None/ {'transform_n_nonzero_coefs': 5} / for threshold: {'transform_alpha': .1}\r\n self.ALPHA = 2 # sparsity control. High num = more sparse\r\n self.n_iters = 2 # 50 iters for learning the dict\r\n\r\n # DNN\r\n self.mode = \"1234\" # \"img\", \"1\", \"2\", \"3\", \"4\", \"12\", \"123\", \"1234\"\r\n self.lr = 0.01 # 0.01\r\n self.epochs = 1000\r\n self.batch = 1","sub_path":"data_out/threshold20_Atoms200_Anchor2_Alpha1/files_used_for_run/parameters.py","file_name":"parameters.py","file_ext":"py","file_size_in_byte":2037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"620805441","text":"\r\n\"\"\"\r\nScript to plot histograms of beta-beating before and after\r\nBPM calibration.\r\n\r\nAuthor: Jacqueline Keintzel\r\nDate: 24/10/2019\r\n\"\"\"\r\n\r\nfrom __future__ import print_function\r\nfrom optparse import OptionParser\r\nimport matplotlib.pyplot as plt\r\nimport os\r\nimport numpy as np\r\nfrom func import read_bet_phase, read_bet_amp\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n fix = 10\r\n fiy = 4.5\r\n size = 20\r\n\r\n parser = OptionParser()\r\n parser.add_option(\"-s\", \"--sdds\", dest=\"sdds\", help=\"Folder of sdds files.\", action=\"store\")\r\n parser.add_option(\"-o\", \"--phase\", dest=\"phase\", help=\"Folder of phase output files.\", action=\"store\")\r\n parser.add_option(\"-a\", \"--axis\", dest=\"axis\", help=\"Transverse plane, either x or y.\", action=\"store\")\r\n parser.add_option(\"-w\", \"--when\", dest=\"when\", help=\"Before or after calibration.\", action=\"store\")\r\n parser.add_option(\"-p\", \"--pngpdf\", dest=\"pngpdf\", help=\"Format of plot.\", action=\"store\")\r\n (options, args) = parser.parse_args()\r\n\r\n\r\n all_sdds = [sd for sd in os.listdir(options.sdds) if 'sdds' in sd]\r\n\r\n beat = []\r\n for sdds in all_sdds:\r\n folder = os.path.join(options.phase, sdds)\r\n\r\n beta_phase, beta_phase_err, bpms = read_bet_phase(folder, options.axis)\r\n beta_amp, beta_amp_err = read_bet_amp(folder, options.axis)\r\n\r\n beat = beat + [100*(beta_amp[i] - beta_phase[i])/beta_phase[i] for i in range(len(beta_phase)) if abs(100*(beta_amp[i] - beta_phase[i])/beta_phase[i]) <= 250 ] \r\n\r\n av = np.mean(beat)\r\n\r\n plt.figure(figsize=(fix, fiy))\r\n n, bins, patches = plt.hist(beat, bins=200)\r\n plt.plot(np.array([av,av]), np.array([0,max(n)]), ls='--', color = 'grey', lw = 2)\r\n plt.ylim(0, max(n))\r\n plt.tick_params('both', labelsize=size)\r\n if options.axis == 'x': plt.xlabel(r'$(\\beta_{x, amp}-\\beta_{x, ph}) / \\beta_{x, ph}$ [%]', fontsize=size)\r\n else: plt.xlabel(r'$(\\beta_{y, amp}-\\beta_{y, ph}) / \\beta_{y, ph}$ [%]', fontsize=size)\r\n plt.ylabel('Counts', fontsize=size)\r\n plt.tight_layout()\r\n plt.savefig(options.sdds+'../BPMcalib_'+options.when+'_'+options.axis+'.'+options.pngpdf)\r\n plt.close()\r\n","sub_path":"checkCALIBRATION_histogram.py","file_name":"checkCALIBRATION_histogram.py","file_ext":"py","file_size_in_byte":2150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"589944247","text":"##############################################################################\n# Copyright (c) 2017 taseer94@gmail.com and others.\n#\n# All rights reserved. This program and the accompanying materials\n# are made available under the terms of the Apache License, Version 2.0\n# which accompanies this distribution, and is available at\n# http://www.apache.org/licenses/LICENSE-2.0\n##############################################################################\n\n\nimport click\nimport os\n\nfrom qtip.cli import utils\n\n\n@click.group()\ndef cli():\n \"\"\" Manage QTIP workspace \"\"\"\n pass\n\n\n@cli.command(\"create\", help=\"Create QTIP workspace\")\n@click.option('--pod', default='unknown', help='Name of pod under test')\n@click.option('--installer', help='OPNFV installer', required=True)\n@click.option('--master-host', help='Installer hostname', required=True)\n@click.option('--scenario', default='unknown', help='OPNFV scenario')\n@click.argument('name')\ndef create(pod, installer, master_host, scenario, name):\n extra_vars = {\n 'qtip_package': utils.QTIP_PACKAGE,\n 'cwd': os.getcwd(),\n 'pod_name': pod,\n 'installer': installer,\n 'scenario': scenario,\n 'installer_master_host': master_host,\n 'workspace': name\n }\n os.system(\"ANSIBLE_ROLES_PATH={qtip_package}/{roles_path} ansible-playbook\"\n \" -i {qtip_package}/{roles_path}/qtip-workspace/hosts\"\n \" {qtip_package}/{roles_path}/qtip-workspace/create.yml\"\n \" --extra-vars '{extra_vars}'\"\n \"\".format(qtip_package=utils.QTIP_PACKAGE,\n roles_path=utils.ROLES_PATH,\n extra_vars=utils.join_vars(**extra_vars)))\n","sub_path":"qtip/cli/commands/cmd_workspace.py","file_name":"cmd_workspace.py","file_ext":"py","file_size_in_byte":1697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"581634950","text":"import json\nimport uuid\n\nfrom jupyterhub.handlers import default_handlers\nfrom tornado.httpclient import HTTPRequest\n\nfrom ..misc import get_custom_config\nfrom .misc import RequestAPIHandler\n\n\nclass ForwardTunnelRestartAPIHandler(RequestAPIHandler):\n def check_xsrf_cookie(self):\n pass\n\n \"\"\"APIHandler to forward restart request to tunnel webservice\"\"\"\n\n async def post(self):\n self.set_header(\"Cache-Control\", \"no-cache\")\n if not self.request.headers.get(\"Authorization\", None):\n self.set_status(403)\n return\n\n uuidcode = self.request.headers.get(\"uuidcode\", uuid.uuid4().hex)\n body = self.request.body.decode(\"utf8\")\n body_dict = json.loads(body) if body else {}\n log_extras = {\n \"uuidcode\": uuidcode,\n \"action\": \"restarttunnel\",\n \"body\": body_dict,\n }\n self.log.info(\"Forward request to restart ssh-tunnels\", extra=log_extras)\n custom_config = get_custom_config()\n req_prop = self.get_req_prop(custom_config, \"tunnel\", uuidcode)\n req_prop[\"headers\"][\"Authorization\"] = self.request.headers[\"Authorization\"]\n tunnel_url = req_prop.get(\"urls\", {}).get(\"restart\", \"None\")\n req = HTTPRequest(\n tunnel_url,\n method=\"POST\",\n headers=req_prop[\"headers\"],\n body=self.request.body,\n **req_prop.get(\"request_kwargs\", {})\n )\n try:\n await self.send_request(req, \"restarttunnel\", uuidcode)\n except Exception as e:\n self.set_status(500)\n self.write(str(e))\n return\n\n self.set_status(200)\n return\n\n\ndefault_handlers.append((r\"/api/restarttunnel\", ForwardTunnelRestartAPIHandler))\n","sub_path":"custom/4.0.0/apihandler/tunnel_restart.py","file_name":"tunnel_restart.py","file_ext":"py","file_size_in_byte":1764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"452490990","text":"\"\"\"\nPrototype selection\n\nGiven a set of n elements (S) and their pairwise distances in terms of a\nskbio distance matrix (DM) and a given number (k << n), find the sub-set (s)\nconsisting of exactly k elements, such that s best represents the full set S.\n\nHere, we define \"best represents\" as maximizing the sum of distances between\nall points, i.e. sum {over a,b in S} DM[a,b]. This is our objective function.\n\nThis problem is known to be NP-hard [1], thus we need to resort to heuristics.\nIn the future, this module will implement several heuristics, whose quality can\nbe measured by the objective function for each problem instance, since there is\nno global winner.\nFor completeness, the exact but exponential algorithm is implemented, too.\n \"prototype_selection_exhaustive\"\n\n[1] Gamez, J. Esteban, François Modave, and Olga Kosheleva.\n \"Selecting the most representative sample is NP-hard:\n Need for expert (fuzzy) knowledge.\"\n Fuzzy Systems, 2008. FUZZ-IEEE 2008.\n\"\"\"\n\n# needed for signature type annotations, but only works for python >= 3.5\n# from typing import Sequence, Tuple\nfrom itertools import combinations\n\nimport numpy as np\nimport scipy as sp\nfrom skbio.stats.distance import DistanceMatrix\n\n\ndef distance_sum(elements, dm):\n '''Compute the sum of pairwise distances for the given elements according to\n the given distance matrix.\n\n Parameters\n ----------\n elements: sequence of str\n list or elements for which the sum of distances is computed\n dm: skbio.stats.distance.DistanceMatrix\n pairwise distance matrix.\n\n Returns\n -------\n float:\n the sum of all pairwise distances of dm for IDs in elements\n\n Notes\n -----\n function signature with type annotation for future use with python >= 3.5\n def distance_sum(elements: Sequence[str], dm: DistanceMatrix) -> float:\n '''\n\n return np.tril(dm.filter(elements).data).sum()\n\n\ndef prototype_selection_exhaustive(dm, num_prototypes,\n max_combinations_to_test=200000):\n '''Select k prototypes for given distance matrix\n\n Parameters\n ----------\n dm: skbio.stats.distance.DistanceMatrix\n pairwise distances for all elements in the full set S.\n Must be symmetric and non-hollow.\n num_prototypes: int\n Number of prototypes to select for distance matrix.\n Must be >= 2, since a single prototype is useless.\n Must be smaller than the number of elements in the distance matrix,\n otherwise no reduction is necessary.\n max_combinations_to_test: int\n The maximal number of combinations to test. If exceeding, the function\n declines execution.\n\n Returns\n -------\n sequence of str\n A sequence holding selected prototypes, i.e. a sub-set of the\n elements in the distance matrix.\n\n Raises\n ------\n RuntimeError\n Combinatorics explode even for small instances. To save the user from\n waiting (almost) forever, this function declines execution if the\n number of combinations to test are too high,\n i.e. > max_combinations_to_test\n ValueError\n The number of prototypes to be found should be at least 2 and at most\n one element smaller than elements in the distance matrix. Otherwise, a\n ValueError is raised.\n\n Notes\n -----\n This is the reference implementation for an exact algorithm for the\n prototype selection problem. It has an exponential runtime and will only\n operate on small instances (< max_combinations_to_test).\n Idea: test all (n over k) combinations of selecting k elements from n with-\n out replacement. Compute the objective for each such combination and\n report the combination with maximal value.\n\n function signature with type annotation for future use with python >= 3.5:\n def prototype_selection_exhaustive(dm: DistanceMatrix, num_prototypes: int,\n max_combinations_to_test: int=200000) -> Sequence[str]:\n '''\n if num_prototypes < 2:\n raise ValueError((\"'num_prototypes' must be >= 2, since a single \"\n \"prototype is useless.\"))\n if num_prototypes >= len(dm.ids):\n raise ValueError((\"'num_prototypes' must be smaller than the number of\"\n \" elements in the distance matrix, otherwise no \"\n \"reduction is necessary.\"))\n\n num_combinations = sp.special.binom(len(dm.ids), num_prototypes)\n if num_combinations >= max_combinations_to_test:\n raise RuntimeError((\"Cowardly refuse to test %i combinations. Use a \"\n \"heuristic implementation for instances with more \"\n \"than %i combinations instead!\")\n % (num_combinations, max_combinations_to_test))\n\n max_dist, max_set = -1 * np.infty, None\n for s in set(combinations(dm.ids, num_prototypes)):\n d = distance_sum(s, dm)\n if d > max_dist:\n max_dist, max_set = d, s\n return max_set\n","sub_path":"phylogeny/prototypeSelection.py","file_name":"prototypeSelection.py","file_ext":"py","file_size_in_byte":5020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"574133353","text":"import os\nfrom scipy.stats import gaussian_kde\nfrom numpy.random import normal\nfrom numpy import arange\nimport numpy as np\nfrom gusPyCode.defs import packings\nimport matplotlib as mpl\n\n\n\ndef autoPickColors(number):\n \"\"\"Generate a set of colors with near optimal contrasts.\"\"\"\n packingsPath = os.path.dirname(os.path.realpath(packings.__file__))\n \n coords = map(lambda l: l.strip('\\n'), open(\"%s/pack.3.%s.txt\" % (packingsPath,number), 'rU'))\n coords = np.array(coords, dtype=np.float)\n coords = coords.reshape(number,3)\n coords = coords * 0.5\n coords = coords + coords.max()\n \n colors = []\n for c in coords:\n colors.append(mpl.colors.rgb2hex(np.absolute(c)))\n \n return tuple(colors)\n \ndef setTickSizes(axObj,fontSize):\n for label in axObj.xaxis.get_ticklabels():\n # label is a Text instance\n #label.set_color('red')\n #label.set_rotation(45)\n label.set_fontsize(fontSize)\n \n for label in axObj.yaxis.get_ticklabels():\n # label is a Text instance\n #label.set_color('red')\n #label.set_rotation(45)\n label.set_fontsize(fontSize)\n\ndef violin_plot(ax,data,pos, bp=False):\n '''(http://pyinsci.blogspot.com/2009/09/violin-plot-with-matplotlib.html)\n create violin plots on an axis\n '''\n dist = max(pos)-min(pos)\n w = min(0.15*max(dist,1.0),0.5)\n for d,p in zip(data,pos):\n if d == 0: # WAD: handles cases of no data\n continue\n k = gaussian_kde(d) #calculates the kernel density\n m = k.dataset.min() #lower bound of violin\n M = k.dataset.max() #upper bound of violin\n x = arange(m,M,(M-m)/100.) # support for violin\n v = k.evaluate(x) #violin profile (density curve)\n v = v/v.max()*w #scaling the violin to the available space\n ax.fill_betweenx(x,p,v+p,facecolor='y',alpha=0.3)\n ax.fill_betweenx(x,p,-v+p,facecolor='y',alpha=0.3)\n if bp:\n ax.boxplot(data,notch=1,positions=pos,vert=1)","sub_path":"gusPyCode/defs/mpl_custom.py","file_name":"mpl_custom.py","file_ext":"py","file_size_in_byte":1999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"13482682","text":"import unittest\n\nfrom pycose.attributes import CoseAttrs\n\n\nclass CoseAttrsTest(unittest.TestCase):\n test2_params = \\\n {\n \"test1\": ['alg', \"HS256/64\", {1: 4}],\n \"test2\": ['alg', \"HS256\", {1: 5}],\n \"test3\": ['alg', \"EdDSA\", {1: -8}],\n \"test4\": ['alg', \"AES-MAC128/64\", {1: 14}],\n \"test5\": ['alg', \"AES-MAC128/128\", {1: 25}],\n \"test6\": ['alg', \"A256GCM\", {1: 3}]\n }\n\n def test1_reinstantiation(self):\n new_coseattr = CoseAttrs()\n new_coseattr['alg'] = \"HS256/64\"\n new_coseattr = CoseAttrs()\n # test for empty dictionary\n self.assertEqual(bool(new_coseattr), False)\n\n def test2_creation(self):\n for name_test, (a, b, c) in self.test2_params.items():\n with self.subTest(name=name_test):\n new_coseattr = CoseAttrs()\n new_coseattr[a] = b\n self.assertEqual(new_coseattr, c)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_coseattrs.py","file_name":"test_coseattrs.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"385438741","text":"import FWCore.ParameterSet.Config as cms\nimport os\n\nprocess = cms.Process(\"EcalAlignment\")\n\n# initialize MessageLogger and output report\nprocess.load(\"FWCore.MessageLogger.MessageLogger_cfi\")\nprocess.MessageLogger.cerr.FwkReport.reportEvery = cms.untracked.int32(100)\nprocess.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(True))\n\n########################################\n#### Define new alignment constants ####\n\nprocess.load(\"Configuration.StandardSequences.GeometryDB_cff\")\nprocess.load(\"Configuration.StandardSequences.FrontierConditions_GlobalTag_cff\")\nprocess.GlobalTag.globaltag = cms.string('GR_R_38X_V13::All')\n\nprocess.GlobalTag.toGet = cms.VPSet(\n cms.PSet(\n record = cms.string(\"EBAlignmentRcd\"),\n tag = cms.string(\"EBAlignment_measured_v04_offline\"),\n connect = cms.untracked.string(\"sqlite_file:EBAlign_2010_NoAlign.db\")\n ),\n cms.PSet(\n record = cms.string(\"EEAlignmentRcd\"),\n tag = cms.string(\"EEAlignment_measured_v04_offline\"),\n connect = cms.untracked.string(\"sqlite_file:EEAlign_2010_NoAlign.db\")\n )\n)\n\n########################################\n########################################\n\nfrom TrackingTools.Configuration.TrackingTools_cff import *\n\nprocess.load(\"Configuration.StandardSequences.Services_cff\")\nprocess.load(\"Configuration.StandardSequences.MagneticField_38T_cff\")\nprocess.load(\"FWCore.MessageService.MessageLogger_cfi\")\n\nprocess.load(\"Configuration.StandardSequences.RawToDigi_Data_cff\")\nprocess.load(\"Configuration.StandardSequences.Reconstruction_cff\")\n\nprocess.load(\"Configuration.EventContent.EventContent_cff\")\n\nfrom RecoEgamma.Configuration.RecoEgamma_cff import *\n\n\nprocess.source = cms.Source(\n \"PoolSource\",\n fileNames = cms.untracked.vstring(\n 'file:/tmp/amassiro/669A4128-43D0-DF11-AE93-001A92810ACE.root'\n #'file:/gwterax1/users/amassiro/CMSSWroot/Run2010B_Electron_RAW_RECO/669A4128-43D0-DF11-AE93-001A92810ACE.root'\n )\n )\n\nprocess.maxEvents = cms.untracked.PSet(\n# input = cms.untracked.int32(100)\n input = cms.untracked.int32(-1)\n)\n\n\nprocess.mylocalreco = cms.Sequence(process.trackerlocalreco*process.calolocalreco)\nprocess.myglobalreco = cms.Sequence(process.offlineBeamSpot+process.recopixelvertexing*process.ckftracks+process.ecalClusters+process.caloTowersRec*process.vertexreco*egammarecoGlobal*process.particleFlowCluster)\nprocess.myelectronseeding = cms.Sequence(process.trackerDrivenElectronSeeds*process.ecalDrivenElectronSeeds*process.electronMergedSeeds)\nprocess.myelectrontracking = cms.Sequence(process.electronCkfTrackCandidates*process.electronGsfTracks)\n\nprocess.pReRECO = cms.Sequence(process.RawToDigi*process.mylocalreco*process.myglobalreco*process.myelectronseeding*process.myelectrontracking*process.particleFlowReco*process.pfElectronTranslator*process.gsfElectronSequence)\n\n\n#--------------------------\n#Define PAT sequence\n#--------------------------\n\n# Standard PAT Configuration File\nprocess.load(\"PhysicsTools.PatAlgos.patSequences_cff\")\n\nfrom PhysicsTools.PatAlgos.tools.coreTools import *\n## remove MC matching from the default sequence\nremoveMCMatching(process, ['All'])\n\n\n# add cIc electron ID\nprocess.load(\"EcalValidation.EcalAlignment.CiC_eIDSequence_cff\")\n\nprocess.patElectronIDs = cms.Sequence(process.CiC_eIDSequence)\nprocess.makePatElectrons = cms.Sequence(process.patElectronIDs*process.patElectronIsolation*process.patElectrons)\n\nprocess.patElectrons.electronSource = cms.InputTag(\"gsfElectrons::EcalAlignment\")\n\nprocess.patElectrons.addElectronID = cms.bool(True)\nprocess.patElectrons.electronIDSources = cms.PSet(\n eidVeryLoose = cms.InputTag(\"eidVeryLoose\"),\n eidLoose = cms.InputTag(\"eidLoose\"),\n eidMedium = cms.InputTag(\"eidMedium\"),\n eidTight = cms.InputTag(\"eidTight\"),\n eidSuperTight = cms.InputTag(\"eidSuperTight\")\n )\n##\n#process.patElectrons.addGenMatch = cms.bool(False)\n#process.patElectrons.embedGenMatch = cms.bool(False)\n\n\n# Add tcMET and pfMET\nfrom PhysicsTools.PatAlgos.tools.metTools import *\naddTcMET(process, 'TC')\naddPfMET(process, 'PF')\n\n\n# get the jet corrections\n##from PhysicsTools.PatAlgos.tools.jetTools import *\n##switchJECSet( process, \"Summer09_7TeV_ReReco332\")\n\nfrom PhysicsTools.PatAlgos.tools.cmsswVersionTools import *\n## uncomment this line to run on an 35X input sample\n#run36xOn35xInput(process)\n\n\n\n#--------------------------\n# AllPassFilter\n#--------------------------\n\nprocess.AllEvents = cms.EDProducer(\"EventCountProducer\")\nprocess.FilterL1FilterEvents = cms.EDProducer(\"EventCountProducer\")\nprocess.FilterGoodVertexFilterEvents = cms.EDProducer(\"EventCountProducer\")\nprocess.FilterNoScrapingFilterEvents = cms.EDProducer(\"EventCountProducer\")\nprocess.FilterElectronFilterEvents = cms.EDProducer(\"EventCountProducer\")\nprocess.FilterReRECOEvents = cms.EDProducer(\"EventCountProducer\")\nprocess.FilterPatDefaultSequenceEvents = cms.EDProducer(\"EventCountProducer\")\n\n\n#--------------------------\n# Ntple\n#--------------------------\n\nprocess.ntupleEcalAlignment = cms.EDAnalyzer(\n 'EcalAlignment',\n recHitCollection_EB = cms.InputTag(\"ecalRecHit\",\"EcalRecHitsEB\"),\n recHitCollection_EE = cms.InputTag(\"ecalRecHit\",\"EcalRecHitsEE\"),\n EleTag = cms.InputTag(\"patElectrons\"),\n TrackTag = cms.InputTag(\"generalTracks\"),\n CALOMetTag = cms.InputTag(\"patMETs\"),\n eleId_names = cms.untracked.vstring('eidLoose','eidMedium','eidSuperTight','eidTight','eidVeryLoose'),\n )\n\n\n\nprocess.TFileService = cms.Service(\n \"TFileService\",\n fileName = cms.string(\"EcalAlignment.root\")\n )\n\nprocess.out = cms.OutputModule(\"PoolOutputModule\",\n outputCommands = cms.untracked.vstring(\n 'keep *',\n# 'drop *',\n# 'keep *_gsfElectrons_*_*'\n# 'keep *_*_*_EcalAlignmentBIS'\n# 'drop *_gsfElectron_*_*',\n# 'drop *_gsfElectronCores_*_*'\n ),\n fileName = cms.untracked.string(\"outCMSSW.root\")\n)\n\n\n#--------------------------\n# filters\n#--------------------------\n\n# filter on PhysDeclared bit\nprocess.skimming = cms.EDFilter(\n \"PhysDecl\",\n applyfilter = cms.untracked.bool(True),\n debugOn = cms.untracked.bool(False),\n HLTriggerResults = cms.InputTag(\"TriggerResults\",\"\",\"HLT\")\n )\n\n# filter on bit = and (40 || 41) and !(bit36 || bit37 || bit38 || bit39)\nprocess.load('L1TriggerConfig.L1GtConfigProducers.L1GtTriggerMaskTechTrigConfig_cff')\nprocess.load('HLTrigger/HLTfilters/hltLevel1GTSeed_cfi')\nprocess.hltLevel1GTSeed.L1TechTriggerSeeding = cms.bool(True)\nprocess.hltLevel1GTSeed.L1SeedsLogicalExpression = cms.string('0 AND (40 OR 41) AND NOT (36 OR 37 OR 38 OR 39)')\n\n# filter on primary vertex\nprocess.primaryVertexFilter = cms.EDFilter(\n \"GoodVertexFilter\",\n vertexCollection = cms.InputTag('offlinePrimaryVertices'),\n minimumNDOF = cms.uint32(4) ,\n maxAbsZ = cms.double(24),\n maxd0 = cms.double(2)\n )\n\n# FilterOutScraping\nprocess.noscraping = cms.EDFilter(\n \"FilterOutScraping\",\n applyfilter = cms.untracked.bool(True),\n debugOn = cms.untracked.bool(False),\n numtrack = cms.untracked.uint32(10),\n thresh = cms.untracked.double(0.25)\n )\n\n# select events with at least one gsf electron\nprocess.highetele = cms.EDFilter(\n \"GsfElectronSelector\",\n src = cms.InputTag(\"gsfElectrons\"),\n cut = cms.string(\"superCluster().get().energy()*sin(theta())> 0 \")\n )\n\nprocess.highetFilter = cms.EDFilter(\n \"CandViewCountFilter\",\n src = cms.InputTag(\"highetele\"),\n minNumber = cms.uint32(1)\n )\n\n\n\n#--------------------------\n# paths\n#--------------------------\n\nprocess.p = cms.Path(\n process.AllEvents # |-> counter\n *process.skimming\n *process.FilterL1FilterEvents # |-> counter\n *process.hltLevel1GTSeed\n *process.FilterGoodVertexFilterEvents # |-> counter \n *process.primaryVertexFilter\n *process.FilterNoScrapingFilterEvents # |-> counter \n *process.noscraping\n *process.FilterElectronFilterEvents # |-> counter \n *process.pReRECO\n *process.highetele\n *process.highetFilter\n *process.FilterReRECOEvents # |-> counter \n *process.patDefaultSequence\n *process.FilterPatDefaultSequenceEvents # |-> counter\n *process.ntupleEcalAlignment\n )\n\n#process.outpath = cms.EndPath(process.out)\n","sub_path":"test/old8TeV/Validation_No_Align_ElectronRERECO_cfg.py","file_name":"Validation_No_Align_ElectronRERECO_cfg.py","file_ext":"py","file_size_in_byte":8374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"91197678","text":"\"\"\"\n-------------------------------------------------------\nMain\n-------------------------------------------------------\nAuthor: Edmund Lui\nID: 160635540\nEmail: luix5540@mylaurier.ca\nSection: CP468\n-------------------------------------------------------\n\"\"\"\nfrom aStarAlgorithm import aStar\n\n\ndef print_grid(grid, path):\n for j in path:\n q = j[0]\n p = j[1]\n grid[p][q] = \"*\"\n for i in grid:\n print()\n for j in i:\n print (j, end=\"\")\n for j in path:\n q = j[0]\n p = j[1]\n grid[p][q] = 0\n print()\n print()\n\n\ndef main():\n # opening input file\n fv = open(\"input4.txt\", \"r\")\n \n # defining variables to make the grid\n cols = 0\n rows = 0\n num_of_robots = 0\n robot_locations = []\n grid = []\n end = (0, 0)\n \n # going through file and extracting data\n x = 0\n for i in fv:\n z = i[:-1]\n # cols and rows\n if x == 0:\n values = z.split(\" \")\n rows = int(values[0])\n cols = int(values[1])\n # number of robots\n elif x == 1:\n num_of_robots = int(z) + x\n # locations of robot\n elif x <= num_of_robots:\n values = z.split(\" \")\n a = int(values[0])\n b = int(values[1])\n robot_locations.append((a, b))\n elif x == num_of_robots + 1:\n values = z.split(\" \")\n a = int(values[0])\n b = int(values[1])\n end = (a, b)\n else:\n temp_grid = []\n for j in z:\n temp_grid.append(int(j))\n grid.append(temp_grid)\n x += 1\n \n for i in robot_locations:\n path = aStar(grid, i, end)\n print(path)\n print_grid(grid, path)\n \n\nmain()\n\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"614310797","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport os\nimport smtplib\n\n# funcao para enviar email\ndef envia_msg(email):\n\n # credenciais\n remetente = 'zimbra@inmetro.rs.gov.br'\n senha = '@ZimbraAccount@123'\n\n # informacoes da mensagem\n destinatario = ['msilva@inmetro.rs.gov.br', 'btougeiro@inmetro.rs.gov.br']\n assunto = 'Conta de e-mail bloqueada.'\n texto = 'A conta de e-mail: %s foi bloqueada por tentativa de spam!' % email\n\n # preparando a mensagem\n mensagem = '\\r\\n'.join([\n 'From: %s' % remetente,\n 'To: %s' % ', '.join(destinatario),\n 'Subject: %s' % assunto,\n '',\n '%s' % texto\n ])\n\n # enviando email\n smtp = smtplib.SMTP('intranet.inmetro.rs.gov.br', 587)\n smtp.starttls()\n smtp.login(remetente, senha)\n smtp.sendmail(remetente, destinatario, mensagem)\n smtp.quit()\n\ndef notificacao(email, items):\n\n # credenciais\n remetente = 'zimbra@inmetro.rs.gov.br'\n senha = '@ZimbraAccount@123'\n\n # informacoes da mensagem\n destinatario = ['msilva@inmetro.rs.gov.br', 'btougeiro@inmetro.rs.gov.br']\n assunto = 'Alerta de e-mails na fila.'\n texto = 'A conta de e-mail: %s esta com %i e-mails na fila!' % (email, items)\n\n # preparando a mensagem\n mensagem = '\\r\\n'.join([\n 'From: %s' % remetente,\n 'To: %s' % ', '.join(destinatario),\n 'Subject: %s' % assunto,\n '',\n '%s' % texto\n ])\n\n # enviando email\n smtp = smtplib.SMTP('intranet.inmetro.rs.gov.br', 587)\n smtp.starttls()\n smtp.login(remetente, senha)\n smtp.sendmail(remetente, destinatario, mensagem)\n smtp.quit()\n\n# cria array de e-mails na fila\nsaida = os.popen(\"mailq | grep ^[0-9A-F] | awk '{print $7}' | sort | uniq -c\").read()\nsaida = saida.split()\n\n# criando dicionario para receber lista\nsaida_dict = dict()\n\n# transforma lista em dicionario\nfor x in range(len(saida)):\n if len(saida) == 0:\n break\n a = saida.pop()\n b = saida.pop()\n saida_dict[a] = int(b)\n\nex = [\"sgi@inmetro.rs.gov.br\", \"sgi@rbmlq.gov.br\", \"todos@inmetro.rs.gov.br\"]\n# identificar spam\nfor x in saida_dict.items():\n # bloqueia e-mail spam\n if x[0] in ex and x[1] >= 50:\n notificacao(x[0], x[1])\n os.system('echo \"$(date) - info - A conta de e-mail: %s esta com %i e-mails na fila.\" >> /agendamentos/spam.log' % (x[0],x[1]))\n if not x[0] in ex and x[1] >= 50:\n os.system('zmprov ma %s zimbraAccountStatus locked' % x[0])\n envia_msg(x[0])\n os.system('echo \"$(date) - warning - A conta de e-mail: %s foi bloqueada por tentativa de spam.\" >> /agendamentos/spam.log' % x[0])\n\nexit()","sub_path":"various/ban_spam.py","file_name":"ban_spam.py","file_ext":"py","file_size_in_byte":2927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"498880766","text":"import wave\r\n\r\nimport pyaudio\r\nimport pyrebase\r\nfrom PyQt5 import QtWidgets\r\n\r\nfrom playsound import playsound\r\n\r\nfrom ui_recorder import Ui_Recorder\r\n\r\nclass RecorderWindow(QtWidgets.QMainWindow, Ui_Recorder):\r\n def __init__(self, parent=None):\r\n super(RecorderWindow, self).__init__(parent)\r\n self.setupUi(self)\r\n self.show()\r\n\r\n self.pushButton.clicked.connect(self.record_message)\r\n self.pushButton_3.clicked.connect(self.play_message)\r\n self.pushButton_4.clicked.connect(self.upload_message)\r\n\r\n def record_message(self):\r\n FORMAT = pyaudio.paInt16\r\n CHANNELS = 2\r\n RATE = 44100\r\n CHUNK = 1024\r\n RECORD_SECONDS = 20\r\n val = 0\r\n\r\n audio = pyaudio.PyAudio()\r\n\r\n stream = audio.open(format=FORMAT, channels=CHANNELS,\r\n rate=RATE, input=True,\r\n frames_per_buffer=CHUNK)\r\n frames = []\r\n\r\n for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):\r\n data = stream.read(CHUNK)\r\n frames.append(data)\r\n val += (0.006 * RECORD_SECONDS)\r\n self.recordBar.setValue(val)\r\n\r\n stream.stop_stream()\r\n stream.close()\r\n audio.terminate()\r\n\r\n WAVE_OUTPUT_FILENAME = \"message.wav\"\r\n waveFile = wave.open(WAVE_OUTPUT_FILENAME, 'wb')\r\n waveFile.setnchannels(CHANNELS)\r\n waveFile.setsampwidth(audio.get_sample_size(FORMAT))\r\n waveFile.setframerate(RATE)\r\n waveFile.writeframes(b''.join(frames))\r\n waveFile.close()\r\n\r\n def play_message(self):\r\n playsound(\"message.wav\")\r\n self.playBar.setValue(100)\r\n\r\n def upload_message(self):\r\n self.uploadBar.setValue(0)\r\n config = {\r\n \"apiKey\": \"AIzaSyBuQKF1M7jZp1NqSLuhZGiTVyFOMX4zUxE\",\r\n \"authDomain\": \"mars-8c17e.firebaseapp.com\",\r\n \"databaseURL\": \"https://mars-8c17e.firebaseio.com\",\r\n \"storageBucket\": \"mars-8c17e.appspot.com\",\r\n }\r\n\r\n firebase = pyrebase.initialize_app(config)\r\n\r\n storage = firebase.storage()\r\n database = firebase.database()\r\n data = {\"Message\": \"sent\"}\r\n\r\n storage.child(\"message.wav\").put(\"message.wav\")\r\n database.child().push(data)\r\n self.uploadBar.setValue(100)","sub_path":"Interface/recorder.py","file_name":"recorder.py","file_ext":"py","file_size_in_byte":2326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"152235102","text":"import os\nimport subprocess\n\n__version__ = '0.4.0'\n\ncurrent_path = os.getcwd()\ntry:\n os.chdir(os.path.dirname(__file__))\n GIT_VERSION = subprocess.check_output([\"git\", \"describe\", \"--always\"]).strip().decode('utf-8')\nexcept (subprocess.CalledProcessError, FileNotFoundError) as e:\n GIT_VERSION = \"Unknown\"\nos.chdir(current_path)","sub_path":"dataman/__version__.py","file_name":"__version__.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"584468596","text":"import numpy as np\nfrom random import shuffle\nfrom past.builtins import xrange\n\ndef svm_loss_naive(W, X, y, reg):\n \"\"\"\n Structured SVM loss function, naive implementation (with loops).\n\n Inputs have dimension D, there are C classes, and we operate on minibatches\n of N examples.\n\n Inputs:\n - W: A numpy array of shape (D, C) containing weights.\n - X: A numpy array of shape (N, D) containing a minibatch of data.\n - y: A numpy array of shape (N,) containing training labels; y[i] = c means\n that X[i] has label c, where 0 <= c < C.\n - reg: (float) regularization strength\n\n Returns a tuple of:\n - loss as single float\n - gradient with respect to weights W; an array of same shape as W\n \"\"\"\n\n # Initialise gradient and loss to zero\n dW = np.zeros(W.shape)\n loss = 0.0\n num_classes = W.shape[1]\n num_train = X.shape[0]\n\n # Compute loss and gradient\n for i in xrange(num_train):\t# For each image in training\n scores = X[i].dot(W)\t# Calculate scores, s = Wx\n correct_class_score = scores[y[i]]\n\n for j in xrange(num_classes):\t# For each class\n\n if j == y[i]:\t# No loss computed if correctly classified\n continue\n\n # Calculate margin, delta = 1\n margin = scores[j] - correct_class_score + 1\n\n if margin > 0:\n loss += margin\n dW[:,j] += X[i,:] \n dW[:,y[i]] -= X[i,:] \n\n # Average loss and gradient over batch\n loss /= num_train\n dW /= num_train\n\n # Add regularization to the loss and gradient\n loss += reg * np.sum(W * W)\n dW += 2*reg*W\n\n return loss, dW\n\n\ndef svm_loss_vectorized(W, X, y, reg):\n \"\"\"\n Structured SVM loss function, vectorized implementation.\n\n Inputs and outputs are the same as svm_loss_naive.\n \"\"\"\n\n # Initialize the gradient and loss as zero\n loss = 0.0\n dW = np.zeros(W.shape)\n\n #############################################################################\n # TODO: #\n # Implement a vectorized version of the structured SVM loss, storing the #\n # result in loss. #\n #############################################################################\n\n # Dimensions\n num_classes = W.shape[1]\n num_train = X.shape[0]\n\n scores = X.dot(W)\n correct_class_score = scores[range(num_train), list(y)].reshape(-1,1)\n\n margins = np.maximum(0, scores - correct_class_score + 1)\n\n # Zero-out margins associated with correct class scores\n margins[range(num_train), list(y)] = 0\t\n\n data_loss = np.sum(margins) / num_train\t# Sum margins & average over batch\n reg_loss = 0.5 * reg * np.sum(W * W)\n\n loss = data_loss + reg_loss\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n\n #############################################################################\n # TODO: #\n # Implement a vectorized version of the gradient for the structured SVM #\n # loss, storing the result in dW. #\n # #\n # Hint: Instead of computing the gradient from scratch, it may be easier #\n # to reuse some of the intermediate values that you used to compute the #\n # loss. #\n #############################################################################\n \n # Create coefficient matrix of size (500 X 10)\n # Determine which elements have margins < 0\n # Sum number of classes that didn't meet margin condition\n coeff_mat = np.zeros((num_train, num_classes))\n coeff_mat[margins > 0] = 1\n coeff_mat[range(num_train), list(y)] = -np.sum(coeff_mat, axis=1)\n\n # Multiply coefficient matrix with transpose of input\n # This selects inputs to be added to gradient matrix based on margin conditions\n dW = np.dot(X.T, coeff_mat)\n\n # Average gradient over batch\n dW /= num_train\n\n # Add regularization to the loss and gradient\n dW += 2*reg*W\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW\n","sub_path":"assignment1/cs231n/classifiers/linear_svm.py","file_name":"linear_svm.py","file_ext":"py","file_size_in_byte":4471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"30540710","text":"import sys\r\nimport smtplib\r\nimport datetime\r\nfrom email.mime.multipart import MIMEMultipart\r\nfrom email.mime.text import MIMEText\r\nfrom MySQLCL import MySQLCL as MySQL\r\n\r\nclass Email(object):\r\n \"\"\"Used for defining functions for sending alert emails and reports\"\"\"\r\n\r\n accounts = MySQL.Get(\"SELECT ID, EmailAddress, Password, `Port`, DisplayAddress, MailServer FROM tweetalertaccounts WHERE Enabled = 1 LIMIT 1\")\r\n\r\n @staticmethod\r\n def SendMail(to, subject, message):\r\n fromaddr = Email.accounts[0][4]\r\n msg = MIMEMultipart()\r\n msg[\"From\"] = fromaddr\r\n msg[\"To\"] = to[0]\r\n msg[\"Subject\"] = subject\r\n body = message\r\n msg.attach(MIMEText(body, \"plain\"))\r\n text = msg.as_string()\r\n server = smtplib.SMTP(Email.accounts[0][5] + \":\" + str(Email.accounts[0][3]))\r\n server.ehlo()\r\n server.starttls()\r\n server.login(Email.accounts[0][1], Email.accounts[0][2])\r\n server.sendmail(fromaddr, to, text)\r\n server.quit()\r\n\r\n @staticmethod\r\n def SendAlertSummary():\r\n content = \"Errors in the past hour:\\n\"\r\n alerts = MySQL.Get(\"SELECT ID, Message, Timestamp, SentInEmail FROM tweetalerts WHERE SentInEmail = 0\")\r\n for alt in alerts:\r\n content += str(alt[2]) + \" - \" + str(alt[1]) + \"\\n\\n\"\r\n MySQL.Set(\"UPDATE tweetalerts SET SentInEmail = 1 WHERE ID = \" + str(alt[0]))\r\n Email.SendMail(\"redcameronapptest@gmail.com\", \"Alerts - \" + str(datetime.date.today()), content)","sub_path":"PythonScripts/Email.py","file_name":"Email.py","file_ext":"py","file_size_in_byte":1520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"8983316","text":"import operator\nfrom functools import reduce\nfrom typing import List, Dict\n\nimport networkx as nx\nimport numpy as np\nfrom loguru import logger\nfrom tqdm import tqdm\n\nfrom recommenders.base_recommender import RecommenderBase\nfrom recommenders.pagerank.sparse_graph import SparseGraph\nfrom shared.meta import Meta\nfrom shared.user import WarmStartUser\nfrom shared.utility import get_combinations, hashable_lru\n\nRATING_CATEGORIES = {1, 0, -1}\n\n\ndef construct_collaborative_graph(graph: nx.Graph, training: Dict[int, WarmStartUser], rating_type=None):\n for user_id, user in training.items():\n user_id = f'user_{user_id}'\n graph.add_node(user_id, entity=False)\n\n for entity_idx, sentiment in user.training.items():\n # Skip don't knows if no rating type have been specified\n if rating_type is None and sentiment == 0:\n continue\n\n if rating_type is None or sentiment == rating_type:\n graph.add_node(entity_idx, entity=True)\n graph.add_edge(user_id, entity_idx, sentiment=sentiment)\n\n return graph\n\n\ndef construct_knowledge_graph(meta: Meta):\n graph = nx.Graph()\n\n for triple in meta.triples:\n if triple.head not in meta.uri_idx or triple.tail not in meta.uri_idx:\n logger.warning(f'Could not lookup triple data for {triple.head} or {triple.tail}')\n\n continue\n\n head = meta.uri_idx[triple.head]\n tail = meta.uri_idx[triple.tail]\n\n graph.add_node(head, entity=True)\n graph.add_node(tail, entity=True)\n graph.add_edge(head, tail, type=triple.relation)\n\n return graph\n\n\ndef get_cache_key(answers):\n return str(sorted(answers.items(), key=lambda x: x[0]))\n\n\nclass PageRankRecommender(RecommenderBase):\n def __init__(self, meta: Meta, ask_limit: int):\n super().__init__(meta)\n self.parameters = None\n\n self.entity_indices = set()\n self.sparse_graph = None\n\n # How many of the top-k entities we can ask about in validation\n self.ask_limit = ask_limit\n\n def clear_cache(self):\n self._get_scores.cache_clear()\n\n def construct_graph(self, training: Dict[int, WarmStartUser]):\n raise NotImplementedError()\n\n @staticmethod\n def _weight(category, ratings, importance):\n if not ratings[category] or not importance[category]:\n return 0\n\n return importance[category] / len(ratings[category])\n\n def get_node_weights(self, answers, importance):\n ratings = {category: set() for category in RATING_CATEGORIES}\n\n for entity_idx, sentiment in answers.items():\n ratings[sentiment].add(entity_idx)\n\n # Find rated and unrated entities\n rated_entities = reduce(lambda a, b: a.union(b), ratings.values())\n unrated_entities = self.sparse_graph.node_set.difference(rated_entities)\n\n # Treat unrated entities as unknown ratings\n ratings[0] = ratings[0].union(unrated_entities)\n\n # Compute the weight of each rating category\n rating_weight = {category: self._weight(category, ratings, importance) for category in RATING_CATEGORIES}\n\n # Assign weight to each node depending on their rating\n return {idx: rating_weight[category] for category in RATING_CATEGORIES for idx in ratings[category]}\n\n def fit(self, training: Dict[int, WarmStartUser]):\n for _, user in training.items():\n for entity in user.training.keys():\n self.entity_indices.add(entity)\n\n self.sparse_graph = SparseGraph(self.construct_graph(training))\n\n can_ask_about = set(self.meta.get_question_candidates(training, limit=self.ask_limit))\n logger.debug(f'Can ask about {len(can_ask_about)} entities')\n\n if not self.parameters:\n parameters = {\n 'alpha': np.arange(0.1, 1, 0.15),\n 'importance': [\n {1: 0.95, 0: 0.05, -1: 0.0},\n {1: 0.85, 0: 0.15, -1: 0.0},\n ]\n }\n\n combinations = get_combinations(parameters)\n\n results = list()\n\n for combination in combinations:\n self.parameters = combination\n\n progress = tqdm(training.items())\n progress.set_description(str(combination))\n\n predictions = list()\n for _, user in progress:\n user_answers = {idx: rating for idx, rating in user.training.items() if idx in can_ask_about}\n prediction = self.predict(user.validation.to_list(), user_answers)\n\n predictions.append((user.validation, prediction))\n\n score = self.meta.validator.score(predictions, self.meta)\n results.append((combination, score))\n\n logger.info(f'Score: {score:.4f}')\n\n self.clear_cache()\n\n best_pair = sorted(results, key=operator.itemgetter(1), reverse=True)[0]\n self.parameters = best_pair[0]\n\n logger.info(f'Found optimal ({best_pair[1]:.4f}): {self.parameters}')\n\n @hashable_lru(maxsize=1024)\n def _get_scores(self, answers):\n return self.sparse_graph.scores(alpha=self.parameters['alpha'],\n personalization=self.get_node_weights(answers, self.parameters['importance']))\n\n def predict(self, items: List[int], answers: Dict[int, int]) -> Dict[int, float]:\n # Remove unknown answers\n answers = {idx: sentiment for idx, sentiment in answers.items() if sentiment}\n\n # Get scores for all entities\n all_scores = self._get_scores(answers)\n\n # Return only requested item scores\n return {item: all_scores.get(item, 0) for item in items}\n","sub_path":"recommenders/pagerank/pagerank_recommender.py","file_name":"pagerank_recommender.py","file_ext":"py","file_size_in_byte":5754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"54763638","text":"\"\"\"\nThe presence of references is what keeps an object alive in memory.\nWhen the reference count of an object reaches zero, the garbage collector\ndisposes of it. But sometimes it is useful to have a reference to an object\nthat does not keep it around longer than necessary.\nA common use case is a cache. Weak references to an object do not increase\nits reference count. The object that is the target of a reference is called the\nreferent. Therefore, we say that a weak reference does not prevent the referent\nfrom being garbage collected. Weak references are useful in caching applications\nbecause you don’t want the cached objects to be kept alive just because they\nare referenced by the cache.\n\nIn other words, consider using WeakKeyDictionary, WeakValueDictionary, WeakSet\nand finalize (which use weak references internally) instead of creating and\nhandling your own weakref.ref instances by hand.\n\"\"\"\n\nimport weakref\n\n\ndef f1():\n \"\"\"\n >>> f1()\n False\n True\n \"\"\"\n a_set = {1, 2}\n wref = weakref.ref(a_set)\n print(wref() is None)\n\n a_set = {1, 2, 3}\n print(wref() is None)\n\n\nclass Cheese:\n \"\"\"\n The class WeakValueDictionary implements a mutable mapping where the values\n are weak references to objects. When a referred object is garbage collected\n elsewhere in the program, the corresponding key is automatically removed\n from WeakValueDictionary. This is commonly used for caching.\n\n A counterpart to the WeakValueDictionary is the WeakKeyDictionary\n in which the keys are weak references.\n\n [A WeakKeyDictionary ] can be used to associate additional data with an\n object owned by other parts of an application without adding attributes\n to those objects. This can be especially useful with objects that override\n attribute accesses.\n\n The weakref module also provides a WeakSet, simply described in the docs as\n “Set class that keeps weak references to its elements. An element will be\n discarded when no strong reference to it exists any more.”\n If you need to build a class that is aware of every one of its instances,\n a good solution is to create a class attribute with a WeakSet to hold the\n references to the instances. Otherwise, if a regular set was used,\n the instances would never be garbage collected, because the class itself\n would have strong references to them, and classes live as long as the\n Python process unless you deliberately delete them.\n \"\"\"\n def __init__(self, kind):\n self.kind = kind\n\n def __repr__(self):\n return 'Cheese(%r)' % self.kind\n\n\ndef stock_cheese():\n \"\"\"\n >>> stock_cheese()\n ['Ementaler', 'Red Leicester']\n ['Ementaler']\n []\n \"\"\"\n stock = weakref.WeakValueDictionary()\n catalog = [Cheese('Red Leicester'), Cheese('Ementaler')]\n\n for cheese in catalog:\n stock[cheese.kind] = cheese\n\n print(sorted(stock.keys()))\n\n del catalog\n print(sorted(stock.keys()))\n\n del cheese\n print(sorted(stock.keys()))\n\n\nclass MyList(list):\n \"\"\"\n Basic list and dict instances may not be referents, but a plain subclass\n of either can solve this problem easily.\n\n >>> a_list = MyList(range(10))\n >>> wref_to_a_list = weakref.ref(a_list)\n\n But int and tuple instances cannot be targets of weak references,\n even if subclasses of those types are created.\n\n Most of these limitations are implementation details of CPython that may\n not apply to other Python interpreters. They are the result of internal\n optimizations.\n \"\"\"\n\n\nif __name__ == \"__main__\":\n import doctest\n doctest.testmod(verbose=True, optionflags=doctest.ELLIPSIS)\n","sub_path":"code_utils/weak_references.py","file_name":"weak_references.py","file_ext":"py","file_size_in_byte":3657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"540331587","text":"# pylint: disable=E1101\n\nfrom typing import List\nfrom mongoengine.queryset.visitor import Q\n\nfrom src.data.interfaces import StoreRepositoryInterface\nfrom src.domain.models import Store\nfrom src.infra.config import DBConnectionHandler\nfrom src.infra.entities import Store as StoreEntity\n\n\nclass StoreRepository(StoreRepositoryInterface):\n \"\"\" Class to manage Store Repository \"\"\"\n\n @classmethod\n def search_by_name_or_typestore(cls, name: str = None, typestore: str = None) -> List[Store]:\n # \"\"\"\n # Search data by name in StoreEntity\n # :param - name: name of the store\n # :return - list of store\n # \"\"\"\n\n with DBConnectionHandler():\n try:\n filters = None\n if typestore:\n filters = Q(typestore=typestore)\n if name:\n filters = filters & Q(name__icontains=name) if filters else Q(name__icontains=name)\n\n if filters:\n stores = StoreEntity.objects.filter(filters).order_by('name')[:50]\n else:\n stores = StoreEntity.objects.order_by('name')[:50]\n\n list_of_stores = []\n for store in stores:\n list_of_stores.append(Store(\n id=store.id,\n name=store.name,\n typestore=store.typestore,\n typestorename=store.typestorename\n ))\n return list_of_stores\n except:\n ...\n\n return []\n","sub_path":"src/infra/repositories/store_repository.py","file_name":"store_repository.py","file_ext":"py","file_size_in_byte":1585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"288217410","text":"# We want to turn the given integer into a number that has only one non-zero digit using a tail rounding approach.\n# This means that at each step we take the last non 0 digit of the number and round it to 0 or to 10.\n# If it's less than 5 we round it to 0 if it's larger than or equal to 5 we round it to 10 (rounding to 10 means increasing the next significant digit by 1).\n# The process stops immediately once there is only one non-zero digit left.\n\n# + Examples\n\n# - For value = 15, the output should be\n# > rounders(value) = 20;\n# - For value = 1234, the output should be\n# > rounders(value) = 1000.\n# 1234 -> 1230 -> 1200 -> 1000.\n# - For value = 1445, the output should be\n# > rounders(value) = 2000.\n# 1445 -> 1450 -> 1500 -> 2000.\n\n# + Input/Output\n\n# - [execution time limit] 4 seconds (py3)\n# - [input] integer value\n# A positive integer.\n# Guaranteed constraints:\n# 1 ≤ value ≤ 108.\n# - [output] integer\n# The rounded number.\n\n# + Solutions\n\nfrom math import floor, log10\n\n# - 12/12\n\n\ndef rounders(n):\n for f in range(1, floor(log10(n)) + 1):\n n = round(n / (10 ** f) + 0.01) * 10 ** f\n\n return n\n\n\nprint(rounders(15))\n# > 20\n\nprint(rounders(1234))\n# > 1000\n\nprint(rounders(2000))\n# > 2000\n","sub_path":"code-fights/arcade/the-core/4-loop-tunnel/32-rounders.py","file_name":"32-rounders.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"325943751","text":"#!/usr/bin/python3\n\ndef trim_whitespace(s):\n \"\"\"Deletes whitespace from the beginning and end of a string\"\"\"\n\n while s[0] == ' ':\n s = s[1:]\n\n # array index of last char\n s_end = s.__len__() - 1\n while s[s_end] == ' ':\n s = s[:s_end]\n s_end -= 1\n\n return s\n\ndef create_file(name):\n text = \"#!/usr/bin/python3\"\n fout = open(name + '.py', 'w')\n fout.write(text)\n\nprint(\"Enter a filename to create as a Python 3 program.\")\nprint(\"To quit, enter nothing.\")\nwhile True:\n raw_name = input(\">> \")\n if raw_name == \"\":\n break\n else:\n name = trim_whitespace(raw_name)\n create_file(name)\n print(name)\n","sub_path":"startPy3.py","file_name":"startPy3.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"475475251","text":"\"\"\" Defines the prior and likelihood functions.\n\nThis file contains the functions for the prior and likelihood\nfunctions needed to calculate the posterior distribution over\nthe cosmological parameters.\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\n\n\ndef prior(params, magnitude_mode):\n\n \"\"\" This function calculates the prior.\n\n Parameter:\n 1. params - This is a list of the 4 cosmological parameters\n omega_m, omega_h, H_0 and M.\n 2. magnitude_mode - This parameter decides whether the prior\n on M (absolute magnitude) is 'uniform' or 'gaussian'.\n\n omega_m, omega_h and H_0 cannot take negative values therefore\n we choose the prior probability to be 0 when at least one of \n the three parameters goes negative. For any non-negative values\n of omega_m, omega_h, and H_0, we choose a uniform prior probabiility.\n \"\"\"\n\n # Prior is 0 if omega_m,omega_h, H_0 are negative.\n\n if any(i < 0 for i in params[0:-1]):\n return 0\n\n # Uniform prior on M\n\n if magnitude_mode == \"uniform\":\n return 1\n\n # Gaussian prior on corrected supernova absolute magnitude of M =19.23 +/- 0.042.\n\n else:\n return np.exp(-0.5 * pow((params[3] - 19.23) / 0.042, 2))\n\n\ndef likelihood(params, mu_model, data_lcparam, sys_error, include_sys_error):\n\n \"\"\"This function calculates the likelihood.\n\n Parameter:\n 1. params - This is a list of the 4 cosmological parameters\n omega_m, omega_lambda, H_0 and M.\n 2. mu_model - Input the mu_model function that analytically\n caculates mu. (Not needed if this is a part of the same file\n that defines the function mu_model.)\n 2. data_lcparam - Importing the data file that contains the\n redshift, aparent magnitude and the statistical error data.\n 3. sys_error - This is a 40x40 matrix that contains the \n systematic error data.\n 4. include_sys_error - This parameter decides whether the\n systematic error is included (value -> 'True') or excluded \n (value -> 'False') from the covariance matrix calculation.\n \"\"\"\n\n # Importing an array of size 40 that contains the apparent\n # magnitude data.\n\n app_mag = pd.Series.to_numpy(data_lcparam.mb)\n\n # Calculating the difference between the measured and\n # estimated aparent magnitude.\n\n diff_app_mag = app_mag - (mu_model(params, data_lcparam) - params[3])\n\n # Defining a 40x40 diagonal matrix whose diagonal entries\n # are the square of the corresponding statistical error.\n\n stat_error = np.diag(pow(pd.Series.to_numpy(data_lcparam.dmb), 2))\n\n # Only include the statistical error in the covariance\n # matrix calculation\n\n if include_sys_error == \"False\":\n inv_cov_matrix = np.linalg.inv(stat_error)\n\n # Include the systematic error as well in the covariance\n # matrix calculation.\n\n else:\n inv_cov_matrix = np.linalg.inv(stat_error + sys_error)\n\n return np.exp(-0.5 * (diff_app_mag @ inv_cov_matrix @ diff_app_mag))\n\n\n# data_lcparam = pd.read_csv(\"lcparam_DS17f.txt\", sep=\" \")\n# data_sys = pd.read_csv(\"sys_DS17f.txt\", sep=\" \")\n# data_sys.columns = [\"sys_error\"]\n# sys_error = np.reshape(pd.Series.to_numpy(data_sys.sys_error), (40, 40))\n# added a new line here\n","sub_path":"prior_likelihood.py","file_name":"prior_likelihood.py","file_ext":"py","file_size_in_byte":3195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"343184256","text":"from .base import Base\n\nOTHER = \"Objects.Other.\"\n\n\nclass RenderMaterial(Base, speckle_type=OTHER + \"RenderMaterial\"):\n name: str = None\n opacity: float = 1\n metalness: float = 0\n roughness: float = 1\n diffuse: int = -2894893 # light gray arbg\n emissive: int = -16777216 # black arbg\n","sub_path":"specklepy/objects/other.py","file_name":"other.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"544300855","text":"#!/usr/bin/env python3\n\"\"\"\nAuthor : patarajarina\nDate : 2019-02-26\nPurpose: Rock the Casbah\n\"\"\"\nimport os\nimport argparse\nimport sys\nfrom Bio import SeqIO\nimport collections\n\n# --------------------------------------------------\ndef get_args():\n \"\"\"get command-line arguments\"\"\"\n parser = argparse.ArgumentParser(\n description='Argparse Python script',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n parser.add_argument(\n 'fasta', metavar='FILE', help='FASTA file(s)', nargs='+')\n\n parser.add_argument(\n '-o',\n '--outdir',\n help='Out dir',\n metavar='DIR',\n type=str,\n default='out')\n\n parser.add_argument(\n '-p',\n '--pct_gc',\n help='A named integer argument',\n metavar='int',\n type=int,\n default=50)\n\n parser.add_argument(\n '-f', '--flag', help='A boolean flag', action='store_true')\n\n return parser.parse_args()\n\n\n# --------------------------------------------------\ndef warn(msg):\n \"\"\"Print a message to STDERR\"\"\"\n print(msg, file=sys.stderr)\n\n\n# --------------------------------------------------\ndef die(msg='Something bad happened'):\n \"\"\"warn() and exit with error\"\"\"\n warn(msg)\n sys.exit(1)\n\n\n# --------------------------------------------------\ndef main():\n \"\"\"Make a jazz noise here\"\"\"\n args = get_args()\n fasta= args.fasta\n pct_gc = args.pct_gc\n # = args.flag\n outdir = args.outdir\n seqspl=0\n\n if not os.path.isdir(outdir):\n os.mkdir(outdir)\n\n if not pct_gc in range(1,101):\n die('--pct_gc \"{}\" must be between 0 and 100'.format(pct_gc)) \n\n for n, file in enumerate(fasta,1):\n if not os.path.isfile(file):\n warn('\"{}\" is not a file'.format(file))\n continue\n print(\"{}: {}\".format(n, file))\n base_name, ext = os.path.splitext(os.path.basename(file))\n lowf = os.path.join(outdir,base_name+'_low'+ext)\n highf = os.path.join(outdir, base_name+'_high'+ext)\n lowfhandle = open(lowf,'wt')\n highfhandle = open(highf, 'wt')\n\n# print(file)\n \n for record in SeqIO.parse(file, 'fasta'):\n #lengths.append(len(record.seq))\n seq_len=len(record.seq)\n nucs=collections.Counter(record.seq) #collections.Counter\n gc_num=nucs.get('G',0)+nucs.get('C',0)\n# print(record.seq)\n gcp=int(gc_num/seq_len * 100)\n if gcp < pct_gc:\n SeqIO.write(record, lowfhandle, 'fasta')\n else:\n SeqIO.write(record, highfhandle, 'fasta')\n seqspl=seqspl+1\n print('Done, wrote {} sequences to out dir \"{}\"'.format(seqspl, outdir)) \n# print(gc)\n# print('HIGH')\n# print('str_arg = \"{}\"'.format(str_arg))\n# print('int_arg = \"{}\"'.format(int_arg))\n# print('flag_arg = \"{}\"'.format(flag_arg))\n# print('positional = \"{}\"'.format(pos_arg))\n\n\n# --------------------------------------------------\nif __name__ == '__main__':\n main()\n","sub_path":"assignments/06-fasta-gc/gc.py","file_name":"gc.py","file_ext":"py","file_size_in_byte":3063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"107018885","text":"import datetime\n\nfrom django.contrib.postgres.aggregates import StringAgg\nfrom django.db.models import Case, CharField, Max, OuterRef, Subquery, Sum, When, Func, F, Value\nfrom django.db.models.functions import Concat, Coalesce\n\nfrom usaspending_api.accounts.helpers import start_and_end_dates_from_fyq\nfrom usaspending_api.accounts.models import FederalAccount\nfrom usaspending_api.awards.v2.lookups.lookups import contract_type_mapping\nfrom usaspending_api.common.exceptions import InvalidParameterException\nfrom usaspending_api.common.helpers.orm_helpers import FiscalYearAndQuarter, FiscalYear\nfrom usaspending_api.download.filestreaming import NAMING_CONFLICT_DISCRIMINATOR\nfrom usaspending_api.download.v2.download_column_historical_lookups import query_paths\nfrom usaspending_api.references.models import CGAC, ToptierAgency\nfrom usaspending_api.settings import HOST\n\nAWARD_URL = f\"{HOST}/#/award/\" if \"localhost\" in HOST else f\"https://{HOST}/#/award/\"\n\n\"\"\"\nAccount Download Logic\n\nAccount Balances (A file):\n - Treasury Account\n 1. Get all TASs matching the filers from Q1 to the FSQ selected\n 2. Only include the most recently submitted TASs (uniqueness based on TAS)\n - Federal Account\n 1. Get all TASs matching the filers from Q1 to the FSQ selected\n 2. Only include the most recently submitted TASs (uniqueness based on TAS)\n 3. Group by Federal Accounts\nAccount Breakdown by Program Activity & Object Class (B file):\n - Treasury Account\n 1. Get all TASs matching the filers from Q1 to the FSQ selected\n 2. Only include the most recently submitted TASs (uniqueness based on TAS/PA/OC/DR)\n - Federal Account\n 1. Get all TASs matching the filers from Q1 to the FSQ selected\n 2. Only include the most recently submitted TASs (uniqueness based on TAS/PA/OC/DR)\n 3. Group by Federal Accounts\nAccount Breakdown by Award (C file):\n - Treasury Account\n 1. Get all TASs matching the filers from Q1 to the FSQ selected\n - Federal Account\n 1. Get all TASs matching the filers from Q1 to the FSQ selected\n 2. Group by Federal Accounts\n\"\"\"\n\n\ndef account_download_filter(account_type, download_table, filters, account_level=\"treasury_account\"):\n query_filters = {}\n tas_id = \"treasury_account_identifier\" if account_type == \"account_balances\" else \"treasury_account\"\n\n # Filter by Agency, if provided\n if filters.get(\"agency\", False) and filters[\"agency\"] != \"all\":\n agency = ToptierAgency.objects.filter(toptier_agency_id=filters[\"agency\"]).first()\n if not agency:\n raise InvalidParameterException(\"Agency with that ID does not exist\")\n query_filters[f\"{tas_id}__funding_toptier_agency_id\"] = agency.toptier_agency_id\n\n # Filter by Federal Account, if provided\n if filters.get(\"federal_account\", False) and filters[\"federal_account\"] != \"all\":\n federal_account_obj = FederalAccount.objects.filter(id=filters[\"federal_account\"]).first()\n if federal_account_obj:\n query_filters[f\"{tas_id}__federal_account__id\"] = filters[\"federal_account\"]\n else:\n raise InvalidParameterException(\"Federal Account with that ID does not exist\")\n\n # Filter by Budget Function, if provided\n if filters.get(\"budget_function\", False) and filters[\"budget_function\"] != \"all\":\n query_filters[f\"{tas_id}__budget_function_code\"] = filters[\"budget_function\"]\n\n # Filter by Budget SubFunction, if provided\n if filters.get(\"budget_subfunction\", False) and filters[\"budget_subfunction\"] != \"all\":\n query_filters[f\"{tas_id}__budget_subfunction_code\"] = filters[\"budget_subfunction\"]\n\n # Filter by Fiscal Year and Quarter\n reporting_period_start, reporting_period_end, start_date, end_date = retrieve_fyq_filters(\n account_type, account_level, filters\n )\n query_filters[reporting_period_start] = start_date\n query_filters[reporting_period_end] = end_date\n\n # Create the base queryset\n queryset = download_table.objects\n\n if account_type in [\"account_balances\", \"object_class_program_activity\"]:\n # only include the latest TASs, not all of them\n unique_id_mapping = {\n \"account_balances\": \"appropriation_account_balances_id\",\n \"object_class_program_activity\": \"financial_accounts_by_program_activity_object_class_id\",\n }\n unique_columns_mapping = {\n \"account_balances\": [\"treasury_account_identifier__tas_rendering_label\"],\n \"object_class_program_activity\": [\n \"treasury_account__tas_rendering_label\",\n \"program_activity__program_activity_code\",\n \"object_class__object_class\",\n \"object_class__direct_reimbursable\",\n ],\n }\n distinct_cols = unique_columns_mapping[account_type]\n order_by_cols = distinct_cols + [\"-reporting_period_start\", \"-pk\"]\n latest_ids_q = (\n download_table.objects.filter(**query_filters)\n .distinct(*distinct_cols)\n .order_by(*order_by_cols)\n .values(unique_id_mapping[account_type])\n )\n if latest_ids_q.exists():\n query_filters[f\"{unique_id_mapping[account_type]}__in\"] = latest_ids_q\n\n # Make derivations based on the account level\n if account_level == \"treasury_account\":\n queryset = generate_treasury_account_query(queryset, account_type, tas_id)\n elif account_level == \"federal_account\":\n queryset = generate_federal_account_query(queryset, account_type, tas_id)\n else:\n raise InvalidParameterException(\n 'Invalid Parameter: account_level must be either \"federal_account\" or ' '\"treasury_account\"'\n )\n\n # Apply filter and return\n return queryset.filter(**query_filters)\n\n\ndef get_agency_name_annotation(relation_name: str, cgac_column_name: str) -> Subquery:\n \"\"\"\n Accepts the Django foreign key relation name for the outer queryset to TreasuryAppropriationAccount\n or FederalAccount join and the CGAC column name and returns an annotation ready Subquery object that\n retrieves the CGAC agency name.\n \"\"\"\n outer_ref = f\"{relation_name}__{cgac_column_name}\"\n return Subquery(CGAC.objects.filter(cgac_code=OuterRef(outer_ref)).values(\"agency_name\"))\n\n\ndef generate_treasury_account_query(queryset, account_type, tas_id):\n \"\"\" Derive necessary fields for a treasury account-grouped query \"\"\"\n derived_fields = {\n \"last_reported_submission_period\": FiscalYearAndQuarter(\"reporting_period_end\"),\n # treasury_account_symbol: [ATA-]AID-BPOA/EPOA-MAC-SAC or [ATA-]AID-\"X\"-MAC-SAC\n \"treasury_account_symbol\": Concat(\n Case(\n When(\n **{\n f\"{tas_id}__allocation_transfer_agency_id__isnull\": False,\n \"then\": Concat(f\"{tas_id}__allocation_transfer_agency_id\", Value(\"-\")),\n }\n ),\n default=Value(\"\"),\n output_field=CharField(),\n ),\n f\"{tas_id}__agency_id\",\n Value(\"-\"),\n Case(\n When(**{f\"{tas_id}__availability_type_code\": \"X\", \"then\": Value(\"X\")}),\n default=Concat(\n f\"{tas_id}__beginning_period_of_availability\",\n Value(\"/\"),\n f\"{tas_id}__ending_period_of_availability\",\n ),\n output_field=CharField(),\n ),\n Value(\"-\"),\n f\"{tas_id}__main_account_code\",\n Value(\"-\"),\n f\"{tas_id}__sub_account_code\",\n output_field=CharField(),\n ),\n \"allocation_transfer_agency_identifer_name\": get_agency_name_annotation(\n tas_id, \"allocation_transfer_agency_id\"\n ),\n \"agency_identifier_name\": get_agency_name_annotation(tas_id, \"agency_id\"),\n # federal_account_symbol: fed_acct_AID-fed_acct_MAC\n \"federal_account_symbol\": Concat(\n f\"{tas_id}__federal_account__agency_identifier\",\n Value(\"-\"),\n f\"{tas_id}__federal_account__main_account_code\",\n ),\n \"submission_period\": FiscalYearAndQuarter(\"reporting_period_end\"),\n }\n\n # Derive recipient_parent_name\n if account_type == \"award_financial\":\n derived_fields = award_financial_derivations(derived_fields)\n\n return queryset.annotate(**derived_fields)\n\n\ndef generate_federal_account_query(queryset, account_type, tas_id):\n \"\"\" Group by federal account (and budget function/subfunction) and SUM all other fields \"\"\"\n derived_fields = {\n \"reporting_agency_name\": StringAgg(\"submission__reporting_agency_name\", \"; \", distinct=True),\n \"budget_function\": StringAgg(f\"{tas_id}__budget_function_title\", \"; \", distinct=True),\n \"budget_subfunction\": StringAgg(f\"{tas_id}__budget_subfunction_title\", \"; \", distinct=True),\n \"last_reported_submission_period\": Max(FiscalYearAndQuarter(\"reporting_period_end\")),\n # federal_account_symbol: fed_acct_AID-fed_acct_MAC\n \"federal_account_symbol\": Concat(\n f\"{tas_id}__federal_account__agency_identifier\",\n Value(\"-\"),\n f\"{tas_id}__federal_account__main_account_code\",\n ),\n \"agency_identifier_name\": get_agency_name_annotation(tas_id, \"agency_id\"),\n \"submission_period\": FiscalYearAndQuarter(\"reporting_period_end\"),\n \"last_modified_date\" + NAMING_CONFLICT_DISCRIMINATOR: Max(\"submission__certified_date\"),\n }\n\n # Derive recipient_parent_name for award_financial downloads\n if account_type == \"award_financial\":\n derived_fields = award_financial_derivations(derived_fields)\n\n queryset = queryset.annotate(**derived_fields)\n\n # List of all columns that may appear in A, B, or C files that can be summed\n all_summed_cols = [\n \"budget_authority_unobligated_balance_brought_forward\",\n \"adjustments_to_unobligated_balance_brought_forward\",\n \"budget_authority_appropriated_amount\",\n \"borrowing_authority_amount\",\n \"contract_authority_amount\",\n \"spending_authority_from_offsetting_collections_amount\",\n \"total_other_budgetary_resources_amount\",\n \"total_budgetary_resources\",\n \"obligations_incurred\",\n \"deobligations_or_recoveries_or_refunds_from_prior_year\",\n \"unobligated_balance\",\n \"gross_outlay_amount\",\n \"status_of_budgetary_resources_total\",\n \"transaction_obligated_amount\",\n ]\n\n # Group by all columns within the file that can't be summed\n fed_acct_values_dict = query_paths[account_type][\"federal_account\"]\n grouped_cols = [fed_acct_values_dict[val] for val in fed_acct_values_dict if val not in all_summed_cols]\n queryset = queryset.values(*grouped_cols)\n\n # Sum all fields from all_summed_cols that appear in this file\n values_dict = query_paths[account_type]\n summed_cols = {\n val: Sum(values_dict[\"treasury_account\"].get(val, None))\n for val in values_dict[\"federal_account\"]\n if val in all_summed_cols\n }\n\n return queryset.annotate(**summed_cols)\n\n\ndef retrieve_fyq_filters(account_type, account_level, filters):\n \"\"\" Apply a filter by Fiscal Year and Quarter \"\"\"\n if filters.get(\"fy\", False) and filters.get(\"quarter\", False):\n start_date, end_date = start_and_end_dates_from_fyq(filters[\"fy\"], filters[\"quarter\"])\n\n # For all files, filter up to and including the FYQ\n reporting_period_start = \"reporting_period_start__gte\"\n reporting_period_end = \"reporting_period_end__lte\"\n if str(filters[\"quarter\"]) != \"1\":\n start_date = datetime.date(filters[\"fy\"] - 1, 10, 1)\n else:\n raise InvalidParameterException(\"fy and quarter are required parameters\")\n\n return reporting_period_start, reporting_period_end, start_date, end_date\n\n\ndef award_financial_derivations(derived_fields):\n derived_fields[\"recipient_parent_name\"] = Case(\n When(\n award__latest_transaction__type__in=list(contract_type_mapping.keys()),\n then=\"award__latest_transaction__contract_data__ultimate_parent_legal_enti\",\n ),\n default=\"award__latest_transaction__assistance_data__ultimate_parent_legal_enti\",\n output_field=CharField(),\n )\n derived_fields[\"award_type_code\"] = Coalesce(\n \"award__latest_transaction__contract_data__contract_award_type\",\n \"award__latest_transaction__assistance_data__assistance_type\",\n )\n derived_fields[\"award_type\"] = Coalesce(\n \"award__latest_transaction__contract_data__contract_award_type_desc\",\n \"award__latest_transaction__assistance_data__assistance_type_desc\",\n )\n derived_fields[\"awarding_agency_code\"] = Coalesce(\n \"award__latest_transaction__contract_data__awarding_agency_code\",\n \"award__latest_transaction__assistance_data__awarding_agency_code\",\n )\n derived_fields[\"awarding_agency_name\"] = Coalesce(\n \"award__latest_transaction__contract_data__awarding_agency_name\",\n \"award__latest_transaction__assistance_data__awarding_agency_name\",\n )\n derived_fields[\"awarding_subagency_code\"] = Coalesce(\n \"award__latest_transaction__contract_data__awarding_sub_tier_agency_c\",\n \"award__latest_transaction__assistance_data__awarding_sub_tier_agency_c\",\n )\n derived_fields[\"awarding_subagency_name\"] = Coalesce(\n \"award__latest_transaction__contract_data__awarding_sub_tier_agency_n\",\n \"award__latest_transaction__assistance_data__awarding_sub_tier_agency_n\",\n )\n derived_fields[\"awarding_office_code\"] = Coalesce(\n \"award__latest_transaction__contract_data__awarding_office_code\",\n \"award__latest_transaction__assistance_data__awarding_office_code\",\n )\n derived_fields[\"awarding_office_name\"] = Coalesce(\n \"award__latest_transaction__contract_data__awarding_office_name\",\n \"award__latest_transaction__assistance_data__awarding_office_name\",\n )\n derived_fields[\"funding_agency_code\"] = Coalesce(\n \"award__latest_transaction__contract_data__funding_agency_code\",\n \"award__latest_transaction__assistance_data__funding_agency_code\",\n )\n derived_fields[\"funding_agency_name\"] = Coalesce(\n \"award__latest_transaction__contract_data__funding_agency_name\",\n \"award__latest_transaction__assistance_data__funding_agency_name\",\n )\n derived_fields[\"funding_sub_agency_code\"] = Coalesce(\n \"award__latest_transaction__contract_data__funding_sub_tier_agency_co\",\n \"award__latest_transaction__assistance_data__funding_sub_tier_agency_co\",\n )\n derived_fields[\"funding_sub_agency_name\"] = Coalesce(\n \"award__latest_transaction__contract_data__funding_sub_tier_agency_na\",\n \"award__latest_transaction__assistance_data__funding_sub_tier_agency_na\",\n )\n derived_fields[\"funding_office_code\"] = Coalesce(\n \"award__latest_transaction__contract_data__funding_office_code\",\n \"award__latest_transaction__assistance_data__funding_office_code\",\n )\n derived_fields[\"funding_office_name\"] = Coalesce(\n \"award__latest_transaction__contract_data__funding_office_name\",\n \"award__latest_transaction__assistance_data__funding_office_name\",\n )\n derived_fields[\"recipient_duns\"] = Coalesce(\n \"award__latest_transaction__contract_data__awardee_or_recipient_uniqu\",\n \"award__latest_transaction__assistance_data__awardee_or_recipient_uniqu\",\n )\n derived_fields[\"recipient_name\"] = Coalesce(\n \"award__latest_transaction__contract_data__awardee_or_recipient_legal\",\n \"award__latest_transaction__assistance_data__awardee_or_recipient_legal\",\n )\n derived_fields[\"recipient_parent_duns\"] = Coalesce(\n \"award__latest_transaction__contract_data__ultimate_parent_unique_ide\",\n \"award__latest_transaction__assistance_data__ultimate_parent_unique_ide\",\n )\n derived_fields[\"recipient_parent_name\"] = Coalesce(\n \"award__latest_transaction__contract_data__ultimate_parent_legal_enti\",\n \"award__latest_transaction__assistance_data__ultimate_parent_legal_enti\",\n )\n derived_fields[\"recipient_country\"] = Coalesce(\n \"award__latest_transaction__contract_data__legal_entity_country_code\",\n \"award__latest_transaction__assistance_data__legal_entity_country_code\",\n )\n derived_fields[\"recipient_state\"] = Coalesce(\n \"award__latest_transaction__contract_data__legal_entity_state_code\",\n \"award__latest_transaction__assistance_data__legal_entity_state_code\",\n )\n derived_fields[\"recipient_county\"] = Coalesce(\n \"award__latest_transaction__contract_data__legal_entity_county_name\",\n \"award__latest_transaction__assistance_data__legal_entity_county_name\",\n )\n derived_fields[\"recipient_city\"] = Coalesce(\n \"award__latest_transaction__contract_data__legal_entity_city_name\",\n \"award__latest_transaction__assistance_data__legal_entity_city_name\",\n )\n derived_fields[\"recipient_congressional_district\"] = Coalesce(\n \"award__latest_transaction__contract_data__legal_entity_congressional\",\n \"award__latest_transaction__assistance_data__legal_entity_congressional\",\n )\n derived_fields[\"recipient_zip_code\"] = Coalesce(\n \"award__latest_transaction__contract_data__legal_entity_zip4\",\n Concat(\n \"award__latest_transaction__assistance_data__legal_entity_zip5\",\n \"award__latest_transaction__assistance_data__legal_entity_zip_last4\",\n ),\n )\n derived_fields[\"primary_place_of_performance_country\"] = Coalesce(\n \"award__latest_transaction__contract_data__place_of_perf_country_desc\",\n \"award__latest_transaction__assistance_data__place_of_perform_country_n\",\n )\n derived_fields[\"primary_place_of_performance_state\"] = Coalesce(\n \"award__latest_transaction__contract_data__place_of_perfor_state_desc\",\n \"award__latest_transaction__assistance_data__place_of_perform_state_nam\",\n )\n derived_fields[\"primary_place_of_performance_county\"] = Coalesce(\n \"award__latest_transaction__contract_data__place_of_perform_county_na\",\n \"award__latest_transaction__assistance_data__place_of_perform_county_na\",\n )\n derived_fields[\"primary_place_of_performance_congressional_district\"] = Coalesce(\n \"award__latest_transaction__contract_data__place_of_performance_congr\",\n \"award__latest_transaction__assistance_data__place_of_performance_congr\",\n )\n derived_fields[\"primary_place_of_performance_zip_code\"] = Coalesce(\n \"award__latest_transaction__contract_data__place_of_performance_zip4a\",\n \"award__latest_transaction__assistance_data__place_of_performance_zip4a\",\n )\n derived_fields[\"award_base_action_date_fiscal_year\"] = FiscalYear(\"award__date_signed\")\n\n derived_fields[\"usaspending_permalink\"] = Case(\n When(\n **{\n \"award__generated_unique_award_id__isnull\": False,\n \"then\": Concat(\n Value(AWARD_URL), Func(F(\"award__generated_unique_award_id\"), function=\"urlencode\"), Value(\"/\")\n ),\n }\n ),\n default=Value(\"\"),\n output_field=CharField(),\n )\n\n return derived_fields\n","sub_path":"usaspending_api/accounts/v2/filters/account_download.py","file_name":"account_download.py","file_ext":"py","file_size_in_byte":19360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"1244417","text":"import json\nfrom django.template.loader import render_to_string\nfrom django.http import HttpResponse, HttpResponseBadRequest\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render\nfrom apps.hello.models import MyHttpRequest, UserProfile, Task\nfrom apps.hello.forms import EditForm, TaskForm\nfrom django.utils.decorators import method_decorator\nfrom django.views.generic import CreateView, ListView, UpdateView\nimport signals # noqa\n\n\ndef index_view(request, template='hello/index.html'):\n data = UserProfile.objects.first()\n return render(request, template, {'data': data})\n\n\ndef request_view(request, template='hello/request.html'):\n return render(request, template)\n\n\ndef get_requests(request):\n \"\"\" return 10 last Http Requests from DB and not viewed count\"\"\"\n ten_requests = MyHttpRequest.objects.all()[:10]\n data = dict(\n count=MyHttpRequest.objects.filter(is_viewed=False).count(),\n text=render_to_string('hello/table_for_requests.html',\n dict(requests=ten_requests))\n )\n return HttpResponse(json.dumps(data), content_type='application/json')\n\n\n@login_required()\ndef edit_page(request):\n if request.method == 'POST' and request.is_ajax():\n instance = UserProfile.objects.first()\n form = EditForm(request.POST, request.FILES, instance=instance)\n\n if form.is_valid():\n if request.FILES:\n instance.avatar = request.FILES['avatar']\n\n form.save()\n return HttpResponse(json.dumps(dict(success=True)),\n content_type='application/json')\n return HttpResponseBadRequest(\n json.dumps(dict(success=False, err_msg=form.errors)),\n content_type='application/json')\n\n form = EditForm(instance=UserProfile.objects.first())\n return render(request, 'hello/edit.html', {'form': form})\n\n\ndef update_requests(request):\n MyHttpRequest.objects.update(is_viewed=True)\n return HttpResponse()\n\n\nclass TaskListView(ListView):\n context_object_name = \"task_list\"\n template_name = 'hello/task.html'\n\n def get_queryset(self):\n return Task.objects.filter(user=self.request.user)\n\n @method_decorator(login_required)\n def dispatch(self, *args, **kwargs):\n return super(TaskListView, self).dispatch(*args, **kwargs)\n\n\nclass TaskCreateView(CreateView):\n model = Task\n form_class = TaskForm\n template_name = 'hello/task_create.html'\n success_url = '/task/'\n\n def form_valid(self, form):\n form.instance.user = self.request.user\n return super(TaskCreateView, self).form_valid(form)\n\n @method_decorator(login_required)\n def dispatch(self, *args, **kwargs):\n return super(TaskCreateView, self).dispatch(*args, **kwargs)\n\n\nclass TaskUpdateView(UpdateView):\n model = Task\n form_class = TaskForm\n template_name = 'hello/task_create.html'\n success_url = '/task/'\n\n @method_decorator(login_required)\n def dispatch(self, *args, **kwargs):\n return super(TaskUpdateView, self).dispatch(*args, **kwargs)\n\n\ndef task_sort(request):\n if request.method == 'POST' and request.is_ajax():\n task_position_dicts = json.loads(request.body)\n for task_position_dict in task_position_dicts:\n task = Task.objects.get(id=task_position_dict['key'])\n task.position = task_position_dict['value']\n task.save()\n return HttpResponse(json.dumps(dict(success=True)),\n content_type='application/json')\n return HttpResponseBadRequest()\n","sub_path":"apps/hello/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"316468203","text":"import uncertainties\nfrom uncertainties import ufloat\nimport numpy as np\n\ndiametro_esferas = [2.49, 2.99, 3.49, 3.95] #mm\nu_d = [0.002041241452319, 0.002041241452319, 0.002041241452319, 0.002041241452319] #mm\nd = [] #mm\nr = [] #mm\nfor i in range ( len(diametro_esferas) ):\n d.append ( ufloat (diametro_esferas[i], u_d[i]) )\n r.append ( d[i] / 2 )\n print (r[i])\nprint ()\n\ndiametro_tubo = ufloat (55, 0.0102) #mm\nR = diametro_tubo / 2\nprint (R)\nprint ()\n\nH = ufloat (380, 0.2041) #cm --> mm\nprint (H)\nprint ()\n\nk = [] #adimensional\nfor i in range (len (r) ):\n k.append ( ( 1 + 2.4*r[i]/R ) * ( 1 + 3.3*r[i]/R ) )\n print (k[i])\nprint ()\n \nr_2 = []\nfor i in range ( len(r) ):\n r_2.append ( r[i] * r[i] )\n print (r_2[i])\nprint ()\n\nx = []\nfor i in range ( len(r_2) ):\n x.append ( r_2[i] / k[i] )\n print (x[i])\n \n\n\n\n\n\n\n","sub_path":"Unicamp/F 229/exp. 5/prop_incert_exp4_v01.py","file_name":"prop_incert_exp4_v01.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"428216727","text":"\nimport urllib\nimport urllib.request\nimport webbrowser \nimport re\ndef yunpan_search(key):\n\tkeyword = key\n\tkeyword = keyword.encode('utf-8')\n\tkeyword = urllib.request.quote(keyword)\n\turl = \"http://www.wangpansou.cn/s.php?q=\"+keyword+\"&wp=0&start=0\"\n\t#webbrowser.open(url) \n\treq = urllib.request.Request(url, headers = {\n \t'Connection': 'Keep-Alive',\n \t'Accept': 'text/html, application/xhtml+xml, */*',\n \t'Accept-Language': 'en-US,en;q=0.8,zh-Hans-CN;q=0.5,zh-Hans;q=0.3',\n \t'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko'\n})\n\topener = urllib.request.urlopen(req)\n\thtml = opener.read()\n\thtml = html.decode('utf-8')\n\trex = r'https?://pan.baidu.com.*\\?uk=[0-9]{10}.*[\\d+?]\"'\n\tm = re.findall(rex,html)\n\tf = open('/root/Desktop/txt.txt','w')\n\tfor i in m:\n\t\tf.write(i)\n\t\tf.write('\\n\\n')\n\tf.close();\n\tprint(\"抓取成功!\")\n\t\n\t\n\nif __name__=='__main__':\n\tprint('爬取百度云盘资源快捷爬取')\n\tkey = input('输入你想搜索的资源:')\n\tyunpan_search(key)","sub_path":"dev/python/other/python爬取百度云网盘资源.py","file_name":"python爬取百度云网盘资源.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"38126631","text":"# Raymond Kirk (Tunstill) Copyright (c) 2020\n# Email: ray.tunstill@gmail.com\n\n# Provides conversion methods between different storage types\n\nfrom __future__ import absolute_import, division, print_function\n\nimport json\nimport rospy\nimport rosbag\nimport pathlib\nimport pymongo\nimport argparse\nfrom tqdm import tqdm\nfrom datetime import datetime\nfrom pymongo.collection import ObjectId\n\nfrom topic_store.file_parsers import resolve_scenario_yaml\n\ntry:\n from urlparse import urlparse\nexcept ImportError: # Py3\n from urllib.parse import urlparse\n\nfrom topic_store.database import MongoStorage\nfrom topic_store.filesystem import TopicStorage\n\n\ndef topic_store_to_mongodb(topic_store_file, scenario_file):\n client = MongoStorage.load(scenario_file)\n print(\"Converting '{}' to MongoDB '{}'\".format(topic_store_file.name, client.uri))\n\n storage = TopicStorage.load(topic_store_file)\n with tqdm() as progress_bar:\n for item in storage:\n try:\n client.insert_one(item)\n except pymongo.errors.DuplicateKeyError:\n print(\"Storage Item '_id: {}' already exists in the '{}/{}' collection\".format(item.id, client.name,\n client.collection_name))\n progress_bar.update()\n\n\ndef get_mongo_storage_by_session(client, *args, **kwargs):\n sessions = client.get_unique_sessions()\n if len(sessions) > 1:\n s_lut = sorted([{\n \"id\": sid, \"on\": datetime.fromtimestamp(data[\"time\"]).strftime('%Y-%m-%d %H:%M:%S'),\n \"float_time\": data[\"time\"], \"count\": data[\"count\"]\n } for sid, data in sessions.items()], key=lambda x: x[\"float_time\"])\n print(\"Collection {}/{} contains data from:\\n{}\".format(client.name, client.collection_name, ''.join(\n [\"\\t{}. Session {} on {} containing {} documents\\n\".format(i, s[\"id\"], s[\"on\"], s[\"count\"]) for i, s in\n enumerate(s_lut)])))\n while True:\n try:\n char = raw_input(\"Please enter a number or enter for all: \")\n if char is \"\":\n return client.find(*args, **kwargs)\n return client.find_by_session_id(s_lut[int(char)][\"id\"], *args, **kwargs)\n except (EOFError, ValueError, IndexError):\n print(\"Please choose an appropriate option\")\n continue\n return client.find(*args, **kwargs)\n\n\ndef mongodb_to_topic_store(mongodb_client, topic_store_file, query=None, projection=None):\n print(\"Converting MongoDB '{}' to '{}'\".format(mongodb_client.uri, topic_store_file.name))\n\n if query is None or not isinstance(query, dict):\n storage = get_mongo_storage_by_session(mongodb_client, skip_on_error=True, projection=projection)\n else:\n storage = mongodb_client.find(query, skip_on_error=True, projection=projection)\n\n count = storage.cursor.count()\n\n topic_storage = TopicStorage(topic_store_file)\n\n with tqdm(total=count) as progress_bar:\n for item in storage:\n topic_storage.insert_one(item)\n progress_bar.update()\n\n\ndef mongodb_to_ros_bag(mongodb_client, output_file, query=None, projection=None):\n print(\"Converting MongoDB '{}' to ROS bag '{}'\".format(mongodb_client.uri, output_file.name))\n\n if query is None or not isinstance(query, dict):\n storage = get_mongo_storage_by_session(mongodb_client, skip_on_error=True, projection=projection)\n else:\n storage = mongodb_client.find(query, skip_on_error=True, projection=projection)\n\n count = storage.cursor.count()\n\n ros_bag = rosbag.Bag(str(output_file), 'w')\n\n try:\n with tqdm(total=count) as progress_bar:\n for item in storage:\n msgs = item.to_ros_msg_list()\n time = rospy.Time.from_sec(item[\"_ts_meta\"][\"ros_time\"])\n for msg in msgs:\n if hasattr(msg, \"_connection_header\"):\n source = getattr(msg, \"_connection_header\")[\"topic\"]\n if source:\n try:\n ros_bag.write(source, msg, time)\n except Exception as e:\n print(\"Could not write\", source, 'because', e.message)\n progress_bar.update()\n finally:\n print(\"Closing the ROS bag '{}'\".format(output_file))\n ros_bag.close()\n\n\ndef topic_store_to_ros_bag(topic_store_file, output_file):\n print(\"Converting '{}' to ROS bag '{}'\".format(topic_store_file.name, output_file.name))\n storage = TopicStorage.load(topic_store_file)\n ros_bag = rosbag.Bag(str(output_file), 'w')\n\n try:\n with tqdm() as progress_bar:\n for item in storage:\n msgs = item.to_ros_msg_list()\n time = rospy.Time.from_sec(item[\"_ts_meta\"][\"ros_time\"])\n for msg in msgs:\n if hasattr(msg, \"_connection_header\"):\n source = getattr(msg, \"_connection_header\")[\"topic\"]\n if source:\n try:\n ros_bag.write(source, msg, time)\n except Exception as e:\n print(\"Could not write\", source, 'because', e.message)\n progress_bar.update()\n finally:\n print(\"Closing the ROS bag '{}'\".format(output_file))\n ros_bag.close()\n\n\ndef _convert_cli():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-i\", \"--input\", help=\"Input File\", type=str, required=True)\n parser.add_argument(\"-o\", \"--output\", help=\"Output File\", type=str, required=True)\n parser.add_argument(\"-c\", \"--collection\", help=\"MongoDB collection to use if URI passed as --input\", type=str,\n required=False)\n parser.add_argument(\"-q\", \"--query\", help='MongoDB input query as dict (example: -q '\n '\\'{\"_id\": \"ObjectId(5f718a354e5e8239dcd1eca1)\"}\\'', type=str,\n required=False, default=None)\n parser.add_argument(\"-p\", \"--projection\", help='MongoDB input projection as dict (example: -p \\'{\"name\": 1}\\'',\n type=str, required=False, default=None)\n args = parser.parse_args()\n\n rospy.init_node(\"topic_store_convert\", anonymous=True)\n input_path = pathlib.Path(args.input)\n output_path = pathlib.Path(args.output)\n\n # if not input_path.exists():\n # raise IOError(\"Input file '{}' does not exist\".format(input_path))\n\n if input_path.suffix == \".bag\":\n raise NotImplementedError(\"Converting from ROS bags is not currently supported. \"\n \"The conversion to ROS bags is lossy and requires adding meta data to reconstruct\"\n \"the original .topic_store or database documents\")\n elif input_path.suffix == TopicStorage.suffix and output_path.suffix == \".bag\":\n topic_store_to_ros_bag(input_path, output_path)\n elif input_path.suffix == \".yaml\" and output_path.suffix == TopicStorage.suffix:\n input_path = resolve_scenario_yaml(input_path)\n mongodb_to_topic_store(MongoStorage.load(input_path), output_path)\n elif input_path.suffix == \".yaml\" and output_path.suffix == \".bag\":\n input_path = resolve_scenario_yaml(input_path)\n mongodb_to_ros_bag(MongoStorage.load(input_path), output_path)\n elif input_path.suffix == TopicStorage.suffix and output_path.suffix == \".yaml\":\n output_path = resolve_scenario_yaml(output_path)\n topic_store_to_mongodb(input_path, output_path)\n elif input_path.suffix == output_path.suffix:\n print(\"No conversion or migration for '{}' to '{}'\".format(input_path, output_path))\n print(\"If you would like to copy the file please use 'cp {} {}'\".format(input_path, output_path))\n elif isinstance(args.input, str) and \"mongodb://\" in args.input:\n srv = args.input\n collection = args.collection\n query = args.query\n projection = args.projection\n\n if not hasattr(args, \"query\") or not args.query:\n raise ValueError(\"If input is a MongoDB URI you must specify a DB query -q/--query to export data\")\n if not hasattr(args, \"collection\") or not args.collection:\n raise ValueError(\"If input is a MongoDB URI you must specify a DB collection -c/--collection to query data\")\n\n # Try to parse a query/projection string to a dict and perform some basic cleaning\n # The query string will filter the db documents by client.find(query)\n if query is not None:\n try:\n query, projection = [x if x is None else json.loads(x) for x in [args.query, args.projection]]\n except ValueError:\n print(\"Query/Projection parameter cannot be parsed as a python dict \\nQ: '{}'\\nP: '{}'\".format(\n args.query, args.projection))\n raise\n\n # Some simple rules to support searching by ID from console\n for k, v in query.items():\n if isinstance(v, (str, unicode)) and (v.startswith('ObjectId(') and v.endswith(')')):\n print(\"Converting query field '{}' to ObjectId\".format(k))\n query[k] = ObjectId(str(v[9:-1]))\n\n # DB name will usually be specified as authSource in the URI, if not present use default=topic_store\n db_name = None\n if \"authSource\" in srv:\n options = [s.split('=') for s in urlparse(srv).query.split(\"&\") if s]\n options = {k: v for k, v in options}\n if \"authSource\" in options:\n db_name = options[\"authSource\"]\n client = MongoStorage(collection=collection, uri=srv, db_name=db_name)\n\n if output_path.suffix == \".bag\":\n mongodb_to_ros_bag(client, output_path, query=query, projection=projection)\n elif output_path.suffix == TopicStorage.suffix:\n mongodb_to_topic_store(client, output_path, query=query, projection=projection)\n else:\n raise ValueError(\"No valid conversion from Mongo URI '{}' to '{}' file\".format(client.uri, output_path))\n else:\n print(\"No conversion or migration for '{}' to '{}'\".format(input_path, output_path))\n\n\nif __name__ == '__main__':\n _convert_cli()\n","sub_path":"src/topic_store/convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":10372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"496171309","text":"from __future__ import absolute_import\n\n'''Resnet for cifar dataset.\nPorted form\nhttps://github.com/facebook/fb.resnet.torch\nand\nhttps://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py\n(c) YANG, Wei\n'''\nimport torch.nn as nn\nimport math\nimport torch\nimport numpy as np\nimport torch.nn.functional as F\n\n__all__ = ['resnet','resnet50_whr']\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"3x3 convolution with padding\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = nn.BatchNorm2d(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = nn.BatchNorm2d(planes)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(planes * 4)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass ResNet(nn.Module):\n\n def __init__(self, depth, num_classes=1000, block_name='BasicBlock'):\n super(ResNet, self).__init__()\n # Model type specifies number of layers for CIFAR-10 model\n if block_name.lower() == 'basicblock':\n assert (depth - 2) % 6 == 0, 'When use basicblock, depth should be 6n+2, e.g. 20, 32, 44, 56, 110, 1202'\n n = (depth - 2) // 6\n block = BasicBlock\n elif block_name.lower() == 'bottleneck':\n assert (depth - 2) % 9 == 0, 'When use bottleneck, depth should be 9n+2, e.g. 20, 29, 47, 56, 110, 1199'\n n = (depth - 2) // 9\n block = Bottleneck\n else:\n raise ValueError('block_name shoule be Basicblock or Bottleneck')\n\n\n self.inplanes = 16\n self.conv1 = nn.Conv2d(3, 16, kernel_size=3, padding=1,\n bias=False)\n self.bn1 = nn.BatchNorm2d(16)\n self.relu = nn.ReLU(inplace=True)\n self.layer1 = self._make_layer(block, 16, n)\n self.layer2 = self._make_layer(block, 32, n, stride=2)\n self.layer3 = self._make_layer(block, 64, n, stride=2)\n self.avgpool = nn.AvgPool2d(8)\n self.fc = nn.Linear(64 * block.expansion, num_classes)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def _make_layer(self, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x) # 32x32\n\n x = self.layer1(x) # 32x32\n x = self.layer2(x) # 16x16\n x = self.layer3(x) # 8x8\n\n x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n\n return x\n\n\ndef resnet(**kwargs):\n \"\"\"\n Constructs a ResNet model.\n \"\"\"\n return ResNet(**kwargs)\n\n\ndef conv_1_3x3():\n return nn.Sequential(nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False), # 'SAME'\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True))\n # TODO: nn.MaxPool2d(kernel_size=3, stride=2, padding=0)) # 'valid'\n\ndef conv_1_3x3_dconv():\n return nn.Sequential(Dconv_shuffle(3, 64, 3, 1, 1),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True))\n # TODO: nn.MaxPool2d(kernel_size=3, stride=2, padding=0)) # 'valid'\n\nclass bottleneck(nn.Module):\n def __init__(self, inplanes, planes, kernel_size, strides=(2, 2)):\n super(bottleneck, self).__init__()\n plane1, plane2, plane3 = planes\n self.outchannels = plane3\n self.conv1 = nn.Conv2d(inplanes, plane1, kernel_size=1, stride=1, padding=0, bias=False)\n self.bn1 = nn.BatchNorm2d(plane1)\n self.conv2 = nn.Conv2d(plane1, plane2, kernel_size=kernel_size, stride=strides, padding=int((kernel_size - 1) / 2), bias=False)\n self.bn2 = nn.BatchNorm2d(plane2)\n self.conv3 = nn.Conv2d(plane2, plane3, kernel_size=1, stride=1, padding=0, bias=False)\n self.bn3 = nn.BatchNorm2d(plane3)\n self.conv4 = nn.Conv2d(inplanes, plane3, kernel_size=1, stride=strides, padding=0, bias=False)\n self.bn4 = nn.BatchNorm2d(plane3)\n self.relu = nn.ReLU(inplace=True)\n\n def forward(self, input_tensor):\n out = self.conv1(input_tensor)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n shortcut = self.conv4(input_tensor)\n shortcut = self.bn4(shortcut)\n\n out += shortcut\n out = self.relu(out)\n return out\n\n\nclass identity_block3(nn.Module):\n def __init__(self, inplanes, planes, kernel_size):\n super(identity_block3, self).__init__()\n plane1, plane2, plane3 = planes\n self.outchannels = plane3\n self.conv1 = nn.Conv2d(inplanes, plane1, kernel_size=1, stride=1, padding=0, bias=False)\n self.bn1 = nn.BatchNorm2d(plane1)\n self.conv2 = nn.Conv2d(plane1, plane2, kernel_size=kernel_size, stride=1, padding=int((kernel_size - 1) / 2), bias=False)\n self.bn2 = nn.BatchNorm2d(plane2)\n self.conv3 = nn.Conv2d(plane2, plane3, kernel_size=1, stride=1, padding=0, bias=False)\n self.bn3 = nn.BatchNorm2d(plane3)\n self.relu = nn.ReLU(inplace=True)\n\n def forward(self, input_tensor, return_conv3_out=False): # return_conv3_out is only served for grad_cam.py\n out = self.conv1(input_tensor)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out_conv3 = self.conv3(out)\n out = self.bn3(out_conv3)\n\n out += input_tensor\n out = self.relu(out)\n if return_conv3_out:\n return out, out_conv3\n else:\n return out\nclass Dconv_shuffle(nn.Module):\n \"\"\"\n Deformable convolution with random shuffling of the feature map.\n Random shuffling only happened within each page independently.\n The sampling locations are generated for each forward pass during the training.\n \"\"\"\n def __init__(self, inplane, outplane, kernel_size, stride, padding):\n super(Dconv_shuffle, self).__init__()\n print('cifar Dconv_shuffle is used')\n self.dilated_conv = nn.Conv2d(inplane, outplane, kernel_size=kernel_size, stride=stride, padding=padding,\n bias=False)\n self.indices = None\n\n def _setup(self, inplane, spatial_size):\n self.indices = np.empty((inplane, spatial_size), dtype=np.int64)\n for i in range(inplane):\n self.indices[i, :] = np.arange(self.indices.shape[1])+ i*self.indices.shape[1]\n\n def forward(self, x):\n\n x_shape = x.size() # [128, 3, 32, 32]\n x = x.view(x_shape[0], -1)\n if self.indices is None:\n self._setup(x_shape[1], x_shape[2]*x_shape[3])\n for i in range(x_shape[1]):\n np.random.shuffle(self.indices[i])\n x = x[:, torch.from_numpy(self.indices)].view(x_shape)\n return self.dilated_conv(x)\nclass bottleneck_shuffle(nn.Module):\n def __init__(self, inplanes, planes, kernel_size, strides=(2, 2), type='error'):\n super(bottleneck_shuffle, self).__init__()\n plane1, plane2, plane3 = planes\n self.conv1 = nn.Conv2d(inplanes, plane1, kernel_size=1, stride=1, padding=0, bias=False)\n self.bn1 = nn.BatchNorm2d(plane1)\n\n self.dconv1 = Dconv_shuffle(plane1, plane2, kernel_size=kernel_size, stride=strides, padding=1)\n self.bn2 = nn.BatchNorm2d(plane2)\n\n self.conv3 = nn.Conv2d(plane2, plane3, kernel_size=1, stride=1, padding=0, bias=False)\n self.bn3 = nn.BatchNorm2d(plane3)\n\n self.dconv2 = Dconv_shuffle(inplanes, plane3, kernel_size=1, stride=strides, padding=0)\n self.bn4 = nn.BatchNorm2d(plane3)\n self.relu = nn.ReLU(inplace=True)\n\n def forward(self, input_tensor):\n out = self.conv1(input_tensor)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.dconv1(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n shortcut = self.dconv2(input_tensor)\n shortcut = self.bn4(shortcut)\n\n out += shortcut\n out = self.relu(out)\n return out\n\nclass identity_block3_shuffle(nn.Module):\n def __init__(self, inplanes, planes, kernel_size, type='error'):\n super(identity_block3_shuffle, self).__init__()\n plane1, plane2, plane3 = planes\n\n self.conv1 = nn.Conv2d(inplanes, plane1, kernel_size=1, stride=1, padding=0, bias=False)\n self.bn1 = nn.BatchNorm2d(plane1)\n\n self.dconv = Dconv_shuffle(plane1, plane2, kernel_size=kernel_size, stride=1, padding=1)\n self.bn2 = nn.BatchNorm2d(plane2)\n\n self.conv3 = nn.Conv2d(plane2, plane3, kernel_size=1, stride=1, padding=0, bias=False)\n self.bn3 = nn.BatchNorm2d(plane3)\n self.relu = nn.ReLU(inplace=True)\n\n def forward(self, input_tensor):\n out = self.conv1(input_tensor)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.dconv(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n x_shape = input_tensor.size() # [128, 3, 32, 32]\n x = input_tensor.view(x_shape[0], x_shape[1] * x_shape[2] * x_shape[3]) # [128, 3*32*32]\n shuffled_input = torch.empty(x_shape[0], x_shape[1], x_shape[2], x_shape[3]).cuda(0)\n perm = torch.empty(0).float()\n for i in range(x_shape[1]):\n a = torch.randperm(x_shape[2] * x_shape[3]) + i * x_shape[2] * x_shape[3]\n perm = torch.cat((perm, a.float()), 0)\n shuffled_input[:, :, :, :] = x[:, perm.long()].view(x_shape[0], x_shape[1], x_shape[2], x_shape[3])\n\n out += shuffled_input\n out = self.relu(out)\n return out\n\nclass SELayer(nn.Module):\n def __init__(self, channel, reduction=16):\n super(SELayer, self).__init__()\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n self.fc = nn.Sequential(\n nn.Linear(channel, channel // reduction, bias=False),\n nn.ReLU(inplace=True),\n nn.Linear(channel // reduction, channel, bias=False),\n nn.Sigmoid()\n )\n\n def forward(self, x):\n b, c, _, _ = x.size()\n y = self.avg_pool(x).view(b, c)\n y = self.fc(y).view(b, c, 1, 1)\n return x * y.expand_as(x)\n\ndef softmax(a):\n length = a.size(0)\n a = a.cpu().detach().numpy()\n a_exp = np.exp(a)\n # summ = np.sum(a_exp,keepdim=True)\n summ = a_exp.sum(0)\n for i in range(length):\n a_exp[i] = a_exp[i]/summ\n a_exp = torch.from_numpy(a_exp)\n return a_exp\n\ndef find_min_value(a):\n # length = a.size(0)\n a = a.cpu().detach().numpy()\n # mean = np.mean(a)\n # mean = np.array(mean)\n # mean = torch.from_numpy(mean).cuda()\n min_value = np.min(a)\n min_value = np.array(min_value)\n # mm = torch.tensor([min_value])\n min_value = torch.from_numpy(min_value).cuda()\n return min_value\n\ndef sa(x):\n # global cross-channel averaging # 32,2048,24,8\n x = x.mean(1, keepdim=True) # 32,1,24,8\n h = x.size(2)\n w = x.size(3)\n x = x.view(x.size(0),-1) # 32,192\n z = F.softmax(x, 1)\n # z = x\n # for b in range(x.size(0)):\n # # print z[b]\n # # mean = torch.mean(z[b])\n # # min_value = find_min_value(z[b])\n # # print (min_value)\n # # print z[b]\n # # z[b][z[b] 0:\n self.conv_3x3 = conv_1_3x3()\n else:\n self.conv_3x3 = conv_1_3x3_dconv()\n\n if layer > 10:\n self.bottleneck_1 = bottleneck(16*block_ex, [16*block_ex, 16*block_ex, 64*block_ex], kernel_size=3, strides=(1, 1))\n else:\n self.bottleneck_1 = bottleneck_shuffle(16*block_ex, [16*block_ex, 16*block_ex, 64*block_ex], kernel_size=3, strides=(1, 1), type=type)\n if layer > 11:\n self.identity_block_1_1 = identity_block3(64*block_ex, [16*block_ex, 16*block_ex, 64*block_ex], kernel_size=3)\n else:\n self.identity_block_1_1 = identity_block3_shuffle(64*block_ex, [16*block_ex, 16*block_ex, 64*block_ex], kernel_size=3, type=type)\n if layer > 12:\n self.identity_block_1_2 = identity_block3(64*block_ex, [16*block_ex, 16*block_ex, 64*block_ex], kernel_size=3)\n else:\n self.identity_block_1_2 = identity_block3_shuffle(64*block_ex, [16*block_ex, 16*block_ex, 64*block_ex], kernel_size=3, type=type)\n\n if layer > 20:\n self.bottleneck_2 = bottleneck(64*block_ex, [32*block_ex, 32*block_ex, 128*block_ex], kernel_size=3, strides=(2, 2))\n else:\n self.bottleneck_2 = bottleneck_shuffle(64*block_ex, [32*block_ex, 32*block_ex, 128*block_ex], kernel_size=3, strides=(2, 2), type=type)\n if layer > 21:\n self.identity_block_2_1 = identity_block3(128*block_ex, [32*block_ex, 32*block_ex, 128*block_ex], kernel_size=3)\n else:\n self.identity_block_2_1 = identity_block3_shuffle(128*block_ex, [32*block_ex, 32*block_ex, 128*block_ex], kernel_size=3, type=type)\n if layer > 22:\n self.identity_block_2_2 = identity_block3(128*block_ex, [32*block_ex, 32*block_ex, 128*block_ex], kernel_size=3)\n else:\n self.identity_block_2_2 = identity_block3_shuffle(128*block_ex, [32*block_ex, 32*block_ex, 128*block_ex], kernel_size=3, type=type)\n if layer > 23:\n self.identity_block_2_3 = identity_block3(128*block_ex, [32*block_ex, 32*block_ex, 128*block_ex], kernel_size=3)\n else:\n self.identity_block_2_3 = identity_block3_shuffle(128*block_ex, [32*block_ex, 32*block_ex, 128*block_ex], kernel_size=3, type=type)\n\n if layer > 30:\n self.bottleneck_3 = bottleneck(128*block_ex, [64*block_ex, 64*block_ex, 256*block_ex], kernel_size=3, strides=(2, 2))\n else:\n self.bottleneck_3 = bottleneck_shuffle(128*block_ex, [64*block_ex, 64*block_ex, 256*block_ex], kernel_size=3, strides=(2, 2), type=type)\n if layer > 31:\n self.identity_block_3_1 = identity_block3(256*block_ex, [64*block_ex, 64*block_ex, 256*block_ex], kernel_size=3)\n else:\n self.identity_block_3_1 = identity_block3_shuffle(256*block_ex, [64*block_ex, 64*block_ex, 256*block_ex], kernel_size=3, type=type)\n if layer > 32:\n self.identity_block_3_2 = identity_block3(256*block_ex, [64*block_ex, 64*block_ex, 256*block_ex], kernel_size=3)\n else:\n self.identity_block_3_2 = identity_block3_shuffle(256*block_ex, [64*block_ex, 64*block_ex, 256*block_ex], kernel_size=3, type=type)\n if layer > 33:\n self.identity_block_3_3 = identity_block3(256*block_ex, [64*block_ex, 64*block_ex, 256*block_ex], kernel_size=3)\n else:\n self.identity_block_3_3 = identity_block3_shuffle(256*block_ex, [64*block_ex, 64*block_ex, 256*block_ex], kernel_size=3, type=type)\n if layer > 34:\n self.identity_block_3_4 = identity_block3(256*block_ex, [64*block_ex, 64*block_ex, 256*block_ex], kernel_size=3)\n else:\n self.identity_block_3_4 = identity_block3_shuffle(256*block_ex, [64*block_ex, 64*block_ex, 256*block_ex], kernel_size=3, type=type)\n if layer > 35:\n self.identity_block_3_5 = identity_block3(256*block_ex, [64*block_ex, 64*block_ex, 256*block_ex], kernel_size=3)\n else:\n self.identity_block_3_5 = identity_block3_shuffle(256*block_ex, [64*block_ex, 64*block_ex, 256*block_ex], kernel_size=3, type=type)\n\n if layer > 40:\n self.bottleneck_4 = bottleneck(256*block_ex, [128*block_ex, 128*block_ex, 512*block_ex], kernel_size=3, strides=(2, 2))\n else:\n self.bottleneck_4 = bottleneck_shuffle(256*block_ex, [128*block_ex, 128*block_ex, 512*block_ex], kernel_size=3, strides=(2, 2), type=type)\n if layer > 41:\n self.identity_block_4_1 = identity_block3(512*block_ex, [128*block_ex, 128*block_ex, 512*block_ex], kernel_size=3)\n else:\n self.identity_block_4_1 = identity_block3_shuffle(512*block_ex, [128*block_ex, 128*block_ex, 512*block_ex], kernel_size=3, type=type)\n if layer > 42:\n self.identity_block_4_2 = identity_block3(512*block_ex, [128*block_ex, 128*block_ex, 512*block_ex], kernel_size=3)\n else:\n self.identity_block_4_2 = identity_block3_shuffle(512*block_ex, [128*block_ex, 128*block_ex, 512*block_ex], kernel_size=3, type=type)\n\n self.avgpool = nn.AdaptiveAvgPool2d(1) # TODO: check the final size\n self.fc = nn.Linear(512*block_ex, num_classes)\n\n self.local_conv_layer1 = nn.Conv2d(256, self.num_features, kernel_size=1, padding=0, bias=False)\n self.local_conv_layer2 = nn.Conv2d(512, self.num_features, kernel_size=1, padding=0, bias=False)\n self.local_conv_layer3 = nn.Conv2d(1024, self.num_features, kernel_size=1, padding=0, bias=False)\n self.instance_layer1 = nn.Linear(self.num_features, self.num_classes)\n self.instance_layer2 = nn.Linear(self.num_features, self.num_classes)\n self.instance_layer3 = nn.Linear(self.num_features, self.num_classes)\n # self.se1 = SELayer(256)\n # self.se2 = SELayer(512)\n # self.se3 = SELayer(1024)\n # Initialize the weights\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, nn.BatchNorm2d):\n # raise Exception('You are using a model without BN!!!')\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n def forward(self, input_x):\n # print(input_x.size())\n x = self.conv_3x3(input_x)\n # np.save('/nethome/yuefan/fanyue/dconv/fm3x3.npy', x.detach().cpu().numpy())\n # print(x.size())\n x = self.bottleneck_1(x)\n x = self.identity_block_1_1(x)\n x_layer1 = self.identity_block_1_2(x)\n # print(x.size())\n x = self.bottleneck_2(x_layer1)\n x = self.identity_block_2_1(x)\n x = self.identity_block_2_2(x)\n x_layer2 = self.identity_block_2_3(x)\n # print(x.size())\n x = self.bottleneck_3(x_layer2)\n x = self.identity_block_3_1(x)\n x = self.identity_block_3_2(x)\n x = self.identity_block_3_3(x)\n x = self.identity_block_3_4(x)\n x_layer3 = self.identity_block_3_5(x)\n # print(x.size())\n x = self.bottleneck_4(x_layer3)\n x = self.identity_block_4_1(x)\n x = self.identity_block_4_2(x)\n # print(\"feature shape:\", x.size())\n\n # x_layer1 = self.se1(x_layer1)\n atta = sa(x_layer1)\n x_layer1 = x_layer1*atta\n x_layer1 = nn.functional.avg_pool2d(x_layer1, kernel_size=(32, 32), stride=(1, 1))\n x_layer1 = self.local_conv_layer1(x_layer1)\n x_layer1 = x_layer1.contiguous().view(x_layer1.size(0), -1)\n x_layer1 = self.instance_layer1(x_layer1)\n\n # x_layer2 = self.se2(x_layer2)\n atta = sa(x_layer2)\n x_layer2 = x_layer2*atta\n x_layer2 = nn.functional.avg_pool2d(x_layer2, kernel_size=(16, 16), stride=(1, 1))\n x_layer2 = self.local_conv_layer2(x_layer2)\n x_layer2 = x_layer2.contiguous().view(x_layer2.size(0), -1)\n x_layer2 = self.instance_layer2(x_layer2)\n\n # x_layer3 = self.se3(x_layer3)\n atta = sa(x_layer3)\n x_layer3 = x_layer3*atta\n x_layer3 = nn.functional.avg_pool2d(x_layer3, kernel_size=(8, 8), stride=(1, 1))\n x_layer3 = self.local_conv_layer3(x_layer3)\n x_layer3 = x_layer3.contiguous().view(x_layer3.size(0), -1)\n x_layer3 = self.instance_layer3(x_layer3)\n\n if self.include_top:\n x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n # TODO: why there is no dropout\n x = self.fc(x)\n return x,x_layer1,x_layer2,x_layer3\n\nclass ResnetWHR(nn.Module):\n def __init__(self, dropout_rate, num_classes, include_top):\n \"\"\"\n This is ResNet50 for PCB verison\n \"\"\"\n super(ResnetWHR, self).__init__()\n self.dropout_rate = dropout_rate\n self.num_classes = num_classes\n self.include_top = include_top\n self.num_features = 512\n\n # Define the building blocks\n self.conv_3x3 = conv_1_3x3()\n\n self.bottleneck_1 = bottleneck(16, [16, 16, 64], kernel_size=3, strides=(1, 1))\n self.identity_block_1_1 = identity_block3(64, [16, 16, 64], kernel_size=3)\n self.identity_block_1_2 = identity_block3(64, [16, 16, 64], kernel_size=3)\n\n self.bottleneck_2 = bottleneck(64, [32, 32, 128], kernel_size=3, strides=(2, 2))\n self.identity_block_2_1 = identity_block3(128, [32, 32, 128], kernel_size=3)\n self.identity_block_2_2 = identity_block3(128, [32, 32, 128], kernel_size=3)\n self.identity_block_2_3 = identity_block3(128, [32, 32, 128], kernel_size=3)\n\n self.bottleneck_3 = bottleneck(128, [64, 64, 256], kernel_size=3, strides=(2, 2))\n self.identity_block_3_1 = identity_block3(256, [64, 64, 256], kernel_size=3)\n self.identity_block_3_2 = identity_block3(256, [64, 64, 256], kernel_size=3)\n self.identity_block_3_3 = identity_block3(256, [64, 64, 256], kernel_size=3)\n self.identity_block_3_4 = identity_block3(256, [64, 64, 256], kernel_size=3)\n self.identity_block_3_5 = identity_block3(256, [64, 64, 256], kernel_size=3)\n\n self.bottleneck_4 = bottleneck(256, [128, 128, 512], kernel_size=3, strides=(2, 2))\n self.identity_block_4_1 = identity_block3(512, [128, 128, 512], kernel_size=3)\n self.identity_block_4_2 = identity_block3(512, [128, 128, 512], kernel_size=3)\n\n # =======================================top=============================================\n # self.se1 = SELayer(64)\n # self.se2 = SELayer(128)\n # self.se3 = SELayer(256)\n\n # self.local_conv_layer1 = nn.Conv2d(64, self.num_features, kernel_size=1, padding=0, bias=False)\n # self.local_conv_layer2 = nn.Conv2d(128, self.num_features, kernel_size=1, padding=0, bias=False)\n # self.local_conv_layer3 = nn.Conv2d(256, self.num_features, kernel_size=1, padding=0, bias=False)\n # self.instance_layer1 = nn.Linear(self.num_features, self.num_classes)\n # self.instance_layer2 = nn.Linear(self.num_features, self.num_classes)\n # self.instance_layer3 = nn.Linear(self.num_features, self.num_classes)\n\n self.instance0 = nn.Linear(self.num_features, self.num_classes)\n self.instance1 = nn.Linear(self.num_features, self.num_classes)\n self.instance2 = nn.Linear(self.num_features, self.num_classes)\n self.instance3 = nn.Linear(self.num_features, self.num_classes)\n self.instance4 = nn.Linear(self.num_features, self.num_classes)\n # self.linear_list = []\n # for i in range(16):\n # self.linear_list.append(nn.Linear(self.num_features, self.num_classes).cuda())\n\n # self.local_conv = nn.Conv2d(self.num_features, self.num_features, kernel_size=1, padding=0, bias=False)\n # self.local_bn = nn.BatchNorm2d(self.num_features)\n\n # Initialize the weights\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.Linear):\n nn.init.normal_(m.weight, std=0.001)\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n\n def forward(self, input_x):\n x = self.conv_3x3(input_x)\n\n x = self.bottleneck_1(x)\n x = self.identity_block_1_1(x)\n x_layer1 = self.identity_block_1_2(x)\n\n x = self.bottleneck_2(x_layer1)\n x = self.identity_block_2_1(x)\n x = self.identity_block_2_2(x)\n x_layer2 = self.identity_block_2_3(x)\n\n x = self.bottleneck_3(x_layer2)\n x = self.identity_block_3_1(x)\n x = self.identity_block_3_2(x)\n x = self.identity_block_3_3(x)\n x = self.identity_block_3_4(x)\n x_layer3 = self.identity_block_3_5(x)\n\n x = self.bottleneck_4(x_layer3)\n x = self.identity_block_4_1(x)\n x = self.identity_block_4_2(x)\n\n # x_layer1 = self.se1(x_layer1)\n # x_layer1 = nn.functional.avg_pool2d(x_layer1, kernel_size=(32, 32), stride=(1, 1))\n # x_layer1 = self.local_conv_layer1(x_layer1)\n # x_layer1 = x_layer1.contiguous().view(x_layer1.size(0), -1)\n # x_layer1 = self.instance_layer1(x_layer1)\n #\n # x_layer2 = self.se2(x_layer2)\n # x_layer2 = nn.functional.avg_pool2d(x_layer2, kernel_size=(16, 16), stride=(1, 1))\n # x_layer2 = self.local_conv_layer2(x_layer2)\n # x_layer2 = x_layer2.contiguous().view(x_layer2.size(0), -1)\n # x_layer2 = self.instance_layer2(x_layer2)\n #\n # x_layer3 = self.se3(x_layer3)\n # x_layer3 = nn.functional.avg_pool2d(x_layer3, kernel_size=(8, 8), stride=(1, 1))\n # x_layer3 = self.local_conv_layer3(x_layer3)\n # x_layer3 = x_layer3.contiguous().view(x_layer3.size(0), -1)\n # x_layer3 = self.instance_layer3(x_layer3)\n\n sx = x.size(2) / 4\n x = nn.functional.avg_pool2d(x, kernel_size=(sx, x.size(3)), stride=(sx, x.size(3))) # 4x1\n\n # x = self.local_conv(x)\n # x = self.local_bn(x)\n # x = nn.functional.relu(x)\n\n x4 = nn.functional.avg_pool2d(x, kernel_size=(4, 1), stride=(1, 1))\n x4 = x4.contiguous().view(x4.size(0), -1)\n c4 = self.instance4(x4)\n\n # x = x.view(x.size(0), x.size(1), 16)\n # c_list = []\n # for i in range(16):\n # x_offset = torch.empty(x.size(0), 512).cuda(0)\n # # print(x_offset[:, :, :].size(), x[:, :, i].size())\n # x_offset[:, :] = x[:, :, i]\n # tmp = self.linear_list[i](x_offset)\n # c_list.append(tmp)\n\n x = x.chunk(4, dim=2)\n x0 = x[0].contiguous().view(x[0].size(0), -1)\n x1 = x[1].contiguous().view(x[1].size(0), -1)\n x2 = x[2].contiguous().view(x[2].size(0), -1)\n x3 = x[3].contiguous().view(x[3].size(0), -1)\n c0 = self.instance0(x0)\n c1 = self.instance1(x1)\n c2 = self.instance2(x2)\n c3 = self.instance3(x3)\n return c0, c1, c2, c3, c4#c_list, c4##, x_layer1, x_layer2, x_layer3\n\ndef resnet50_whr(**kwargs):\n \"\"\"\n Constructs a ResNet model.\n \"\"\"\n return Resnet50_whr(**kwargs)\n","sub_path":"models/cifar/resnet_whr.py","file_name":"resnet_whr.py","file_ext":"py","file_size_in_byte":29832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"362695189","text":"# -*- mode: python -*-\r\n\r\nblock_cipher = None\r\n\r\nimport sys\r\nsys.modules['FixTk'] = None\r\n\r\na = Analysis(['LevelEditor.py'],\r\n pathex=['.'],\r\n binaries=[],\r\n datas=[('EditorCode', 'EditorCode')],\r\n hiddenimports=['pygame', '../Code/GlobalConstants', '../Code/Engine', '../Code/SaveLoad', '../Code/imagesDict', 'xml.etree.ElementTree'],\r\n hookspath=[],\r\n runtime_hooks=[],\r\n excludes=['FixTk', 'tcl', 'tk', '_tkinter', 'tkinter', 'Tkinter'],\r\n win_no_prefer_redirects=False,\r\n win_private_assemblies=False,\r\n cipher=block_cipher)\r\npyz = PYZ(a.pure, a.zipped_data,\r\n cipher=block_cipher)\r\n\r\nKey = ['mkl','libopenblas']\r\n\r\ndef remove_from_list(input, keys):\r\n outlist = []\r\n for item in input:\r\n name, _, _ = item\r\n flag = 0\r\n for key_word in keys:\r\n if name.find(key_word) > -1:\r\n flag = 1\r\n if flag != 1:\r\n outlist.append(item)\r\n return outlist\r\n\r\na.binaries = remove_from_list(a.binaries, Key)\r\n\r\nexe = EXE(pyz,\r\n a.scripts,\r\n exclude_binaries=True,\r\n name='LevelEditor',\r\n debug=False,\r\n strip=False,\r\n upx=True,\r\n console=True,\r\n icon='editor_icon.ico' )\r\ncoll = COLLECT(exe,\r\n a.binaries,\r\n a.zipfiles,\r\n a.datas,\r\n strip=False,\r\n upx=True,\r\n name='Editor')\r\n","sub_path":"Editor/LevelEditor.spec","file_name":"LevelEditor.spec","file_ext":"spec","file_size_in_byte":1524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"67590947","text":"# 写一个函数,实现,传入一个表名,把这个表里面的所有数据导出到excel里面\n# def data_to_excel(table_name):\n# pass\n# book.save(table_name.xls)\nimport pymysql\nimport xlwt\n\n\ndef connet_mysql(table_name):\n conn = pymysql.connect(host='', user='', password='', port=3306, db='', charset='utf8')\n cur = conn.cursor()\n sql = \"select * from %s;\" % table_name\n cur.execute(sql)\n # 获取所有返回的数据\n info_list = list(cur.fetchall())\n # 获取表名\n table_head = [field[0] for field in cur.description]\n cur.close()\n conn.close()\n info_list.insert(0, table_head)\n return info_list\n\n\ndef data_to_excel(table_name):\n info_list = connet_mysql(table_name)\n book = xlwt.Workbook()\n sheet = book.add_sheet('userinfo')\n for index,value in enumerate(info_list):\n for index2, value2 in enumerate(value):\n sheet.write(index, index2, value2)\n book.save('stu.xls')\n\n\n","sub_path":"homework/db_to_excel.py","file_name":"db_to_excel.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"598863066","text":"import rclpy\nfrom rclpy.node import Node\nfrom spark_msgs.msg import WaypointData, LaneCoeffs\nimport numpy as np\nimport cv2\n\n\nclass RightWaypointsNode(Node):\n def __init__(self):\n super().__init__(\"RightWaypoints\")\n\n self.subscriber = self.create_subscription(\n LaneCoeffs,\n \"/lane_detection/right_coeffs\",\n self.callback,\n 1\n )\n\n self.publisher = self.create_publisher(\n WaypointData,\n \"/lane_detection/right_waypoints\",\n 1\n )\n\n self.publisher2 = self.create_publisher(\n LaneCoeffs,\n \"/lane_detection/right_waypoint_coeffs\",\n 1\n )\n\n\n def callback(self, msg,n_samples = 100, x_max = 1920, y_max=1080):\n slope = msg.slope\n bias = msg.bias\n\n t_slope, t_bias = self.transform(slope, bias)\n\n if t_slope is not np.inf:\n # Ortaya göre yapalım önce.\n start_x = x_max/2\n start_y = y_max\n\n x_space = np.linspace(start_x - n_samples/2, start_x + n_samples/2, n_samples +1, dtype=\"float64\")\n line_coeffs = np.array([t_slope, start_y - t_slope*start_x])\n line = np.poly1d(line_coeffs)\n y_space = line(x_space)\n\n # Coefficients of the line found and whether it is vertical or rotated\n data2 = LaneCoeffs()\n data2.name = \"Rotated Right\"\n data2.slope = line_coeffs[0]\n data2.bias = line_coeffs[1]\n\n # Waypoint data\n data = WaypointData()\n data.name = \"right\"\n data.x = list(x_space)\n data.y = list(y_space)\n\n self.publisher2.publish(data2)\n self.publisher.publish(data)\n\n else:\n data = WaypointData()\n data2 = LaneCoeffs()\n\n data.name = \"right\"\n data.x = [x_max/2]*n_samples\n data.y = list(np.linspace(y_max-100, y_max-100-n_samples-1, n_samples, dtype=\"float64\"))\n\n data2.name = \"Vertical\"\n data2.slope = 999999999999999.\n data2.bias = x_max/2\n\n self.publisher2.publish(data2)\n self.publisher.publish(data)\n\n\n def transform(self, slope, bias, vertical_slope_thresh=20):\n # Rotate the line 90 degrees\n rot_slope = (1/slope)\n rot_bias = bias\n\n # If slope's absolute value is greater than threshold, the line is vertical\n if abs(rot_slope) >= vertical_slope_thresh:\n rot_slope = np.inf\n\n return (rot_slope, rot_bias)\n\n\ndef main(args=None):\n rclpy.init(args=args)\n\n subscriber = RightWaypointsNode()\n\n rclpy.spin(subscriber)\n\n subscriber.destroy_node()\n rclpy.shutdown()\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"dev_ws/src/lane_detector/lane_detector/RightWaypointsNode.py","file_name":"RightWaypointsNode.py","file_ext":"py","file_size_in_byte":2784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"462248785","text":"\"\"\"ユーザフィードバックサービスモジュール\n\nユーザのフィードバックに関するサービス関数を記述するモジュール\n\"\"\"\nimport json\n\nfrom core.logger import JSON_LOGGER\nfrom domain.enums.similarity_enums import SimilarityModelType\n\n\ndef exec_like_similar_movie_service(\n movie_id: int,\n model_type: SimilarityModelType,\n like: bool\n) -> None:\n\n # ログ経由で、フィードバックを分析できるようにする\n JSON_LOGGER.info(\n json.dumps({\n \"movie_id\": movie_id,\n \"model_type\": model_type.value,\n \"like\": int(like)\n }),\n extra={\n \"type\": \"UserFeedbackLikeSimilarMovie\"\n }\n )\n","sub_path":"src/app/service/feedback_service.py","file_name":"feedback_service.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"268555276","text":"# -*- coding: UTF-8 -*-\nfrom neo.Cryptography import *\nfrom neo.IO import *\nfrom neo.Wallets import *\nfrom .Mixins import VerifiableMixin\nfrom neo.Cryptography.Crypto import *\nfrom neo.Core.Blockchain import Blockchain\nfrom neo.Core.Helper import Helper\nimport json\nimport ctypes\nimport hashlib\n\nclass BlockBase(VerifiableMixin):\n\n # \n # 区块版本\n # \n Version=None\n # \n # 前一个区块的散列值\n # \n PrevHash=None\n # \n # 该区块中所有交易的Merkle树的根\n # \n MerkleRoot = None\n # \n # 时间戳\n # \n Timestamp = None\n # \n # 区块高度\n # \n Index =0\n\n ConsensusData=None\n # \n # 下一个区块的记账合约的散列值\n # \n NextConsensus = None\n # \n # 用于验证该区块的脚本\n # \n Script = None\n\n __hash = None\n\n\n\n def Hash(self):\n if not self.__hash:\n self.__hash = Crypto.Hash256(self.GetHashData())\n\n return self.__hash\n\n\n def Size(self):\n# sizeof(uint) + PrevHash.Size + MerkleRoot.Size + sizeof(uint) + sizeof(uint) + sizeof(\n# ulong) + NextConsensus.Size + 1 + Script.Size;\n\n uintsize = ctypes.sizeof(ctypes.c_uint)\n ulongsize = ctypes.sizeof(ctypes.c_ulong)\n return uintsize + self.PrevHash.Size() + self.MerkleRoot.Size() + uintsize + uintsize + ulongsize + self.NextConsensus.Size() + 1 + self.Script.Size()\n\n\n def Deserialize(self, reader):\n self.DeserializeUnsigned(reader)\n if reader.ReadByte() != 1:\n raise Exception('Incorrect format')\n self.Script = reader.readSerializableArray(self.scripts)\n\n\n def DeserializeUnsigned(self, reader):\n self.Version = reader.readUInt32()\n self.PrevHash = reader.readSerializableArray()\n self.MerkleRoot = reader.readSerializableArray()\n self.Timestamp = reader.readUInt32()\n self.Index = reader.readUInt32()\n self.ConsensusData = reader.readUInt64()\n self.NextConsensus = reader.readSerializableArray()\n\n def SerializeUnsigned(self, writer):\n writer.writeUInt32(self.Version)\n writer.writeSerializableArray(self.PrevHash)\n writer.writeSerializableArray(self.MerkleRoot)\n writer.writeUInt32(self.Timestamp)\n writer.writeUInt32(self.Index)\n writer.writeUInt64(self.ConsensusData)\n writer.writeSerializableArray(self.NextConsensus)\n\n def GetHashData(self):\n raise NotImplementedError('Not Implemented')\n\n def GetMessage(self):\n return self.GetHashData()\n\n\n def GetScriptHashesForVerifying(self):\n if self.PrevHash == None:\n return [ self.Script.VerificationScript.ToScriptHash()]\n\n prev_header = Blockchain.Default().GetHeader(self.PrevHash)\n if prev_header == None:\n raise Exception('Invalid operation')\n return [ prev_header.NextConsensus ]\n\n\n\n def Serialize(self, writer):\n self.SerializeUnsigned(writer)\n writer.writeByte(1)\n writer.writeSerializableArray(self.Script)\n\n\n\n def ToArray(self):\n raise NotImplementedError()\n\n def ToJson(self):\n json = {}\n json[\"hash\"] = self.__hash.toString()\n\n json[\"size\"] = self.Size\n json[\"version\"] = self.Version\n json[\"previousblockhash\"] = self.PrevHash.ToString()\n json[\"merkleroot\"] = self.MerkleRoot.ToString()\n json[\"time\"] = self.Timestamp\n json[\"index\"] = self.Index\n json[\"nonce\"] = self.ConsensusData.ToString(\"x16\")\n json[\"nextconsensus\"] = self.Wallet.ToAddress(self.NextConsensus)\n json[\"script\"] = self.Script.ToJson()\n return json\n\n def Verify(self):\n if self.Hash == Blockchain.GenesisBlock.Hash: return True\n\n if Blockchain.Default().ContainsBlock(self.Hash): return True\n\n prev_header = Blockchain.Default().GetHeader(self.PrevHash)\n\n if prev_header == None: return False\n\n if prev_header.Index + 1 != self.Index: return False\n\n if prev_header.Timestamp >= self.Timestamp: return False\n\n if not Helper.VerifyScripts(self): return False\n\n return True\n\n","sub_path":"neo/Core/BlockBase.py","file_name":"BlockBase.py","file_ext":"py","file_size_in_byte":4297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"2047715","text":"import socket , cv2, pickle, struct\r\n\r\n#creating socket\r\nclient_socket=socket.socket(socket.AF_INET,socket.SOCK_STREAM) #creates a socket object\r\nhost_ip = '192.168.1.7' #taking server ip\r\nport = 1234 #taking serverside port\r\n\r\n#connecting to the server\r\nclient_socket.connect((host_ip,port)) \r\n\r\n#Sending an empty message; the QOTD service works by sending arbitrary data to the socket\r\ndata = b\"\" \r\n\r\n#Return the size of the struct (and hence of the bytes object produced by pack corresponding to the format string format.\r\npayload_size = struct.calcsize(\"Q\") \r\n\r\nwhile True:\r\n while len(data) < payload_size :\r\n packet = client_socket.recv(4*1024) #will read at utmost 4*1024 bytes, blocking if no information is waiting to be read.\r\n if not packet : break\r\n data+=packet\r\n packed_msg_size = data[:payload_size] #setting limit to payload size\r\n data = data[payload_size:]\r\n\r\n #Unpack from the buffer according to the format string format. The result is a tuple \r\n msg_size = struct.unpack(\"Q\", packed_msg_size)[0] \r\n \r\n while len(data) < msg_size :\r\n data += client_socket.recv(4*1024)\r\n frame_data = data[:msg_size]\r\n data = data[msg_size:]\r\n frame = pickle.loads(frame_data) # converting to bytes\r\n cv2.imshow(\"Recieved\",frame)\r\n key = cv2.waitKey(1) & 0xFF\r\n if key == ord(\"q\"):\r\n break\r\n \r\nclient_socket.close()\r\n","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"305844337","text":"\"\"\"\n\"\"\"\ndef load_file(filename):\n \"\"\"Loads a file to a list\n\n Parameters:\n filename (string): the name of the file containing input on a\n single line\n\n Returns: \n a (list) of ints\n \"\"\"\n inputList = []\n print(\"Loading list from file...\") \n with open(filename) as file:\n inputList = [int(x) for x in file.read().split(\",\")]\n\n return inputList\n\ndef parseInstruction(instruction):\n \"\"\"\n Returns:\n opCode, p1, p2\n \"\"\"\n #print(f\"instruction : {instruction}\")\n\n if len(str(instruction)) == 1:\n return instruction, 0, 0\n\n elif instruction == 99:\n return instruction, 0, 0\n \n elif len(str(instruction)) == 3:\n return int(str(instruction)[-2:]), int(str(instruction)[-3:-2]), 0\n\n else:\n return int(str(instruction)[-2:]), int(str(instruction)[-3:-2]), int(str(instruction)[-4:-3])\n\ndef getValue(mode, parameter, memory):\n \"\"\"\n \"\"\"\n if mode == 0:\n return memory[memory[parameter]]\n else:\n return memory[parameter]\n\ndef Computer(memoryIn, someInput):\n \"\"\"Mostly copied from Part 1\n \"\"\"\n memory = memoryIn.copy()\n\n currentIndex = 0\n while currentIndex != 'end' and currentIndex < len(memory):\n\n if memory[currentIndex] == 3 or memory[currentIndex] == 4:\n #print(f\"instruction : {memory[currentIndex]}\")\n opCode, p1, p2 = memory[currentIndex], 0, 0\n else:\n opCode, p1, p2 = parseInstruction(memory[currentIndex])\n\n if opCode == 1:\n #print(f\"storing {getValue(p1, currentIndex + 1, memory)} + {getValue(p2, currentIndex + 2, memory)} at {memory[currentIndex + 3]}\")\n memory[memory[currentIndex + 3]] = getValue(p1, currentIndex + 1, memory) + getValue(p2, currentIndex + 2, memory)\n currentIndex += 4\n #print(\"1 Add\")\n elif opCode == 2:\n memory[memory[currentIndex + 3]] = getValue(p1, currentIndex + 1, memory) * getValue(p2, currentIndex + 2, memory)\n currentIndex += 4\n #print(\"2 Multiply\")\n elif opCode == 3:\n #print(f\"storing {someInput} at {memory[currentIndex + 1]}\")\n memory[memory[currentIndex + 1]] = someInput\n currentIndex += 2\n #print(\"3 Input\") \n elif opCode == 4:\n if p1 == 0:\n print(f\"output: {memory[memory[currentIndex + 1]]}\")\n else:\n print(f\"output: {memory[currentIndex + 1]}\")\n currentIndex += 2\n #print(\"4 Output\")\n elif opCode == 5:\n if getValue(p1, currentIndex + 1, memory) != 0:\n currentIndex = getValue(p2, currentIndex + 2, memory)\n else:\n currentIndex += 3\n elif opCode == 6:\n if getValue(p1, currentIndex + 1, memory) == 0:\n currentIndex = getValue(p2, currentIndex + 2, memory)\n else:\n currentIndex += 3\n elif opCode == 7:\n if getValue(p1, currentIndex + 1, memory) < getValue(p2, currentIndex + 2, memory):\n memory[memory[currentIndex + 3]] = 1\n currentIndex += 4\n else:\n memory[memory[currentIndex + 3]] = 0\n currentIndex += 4\n\n elif opCode == 8:\n if getValue(p1, currentIndex + 1, memory) == getValue(p2, currentIndex + 2, memory):\n memory[memory[currentIndex + 3]] = 1\n currentIndex += 4\n else:\n memory[memory[currentIndex + 3]] = 0\n currentIndex += 4\n\n else:\n currentIndex = 'end'\n print(\"99 End\")\n\n#test = [3, 12, 6, 12, 15, 1, 13, 14, 13, 4, 13, 99, -1, 0, 1, 9]\n#test = [3, 3, 1105, -1, 9, 1101, 0, 0, 12, 4, 12, 99, 1]\n#Computer(test, 2)\n\nComputer(load_file('Day 5/input.txt'), 5)\n\n","sub_path":"Day 5/Sunny with a Chance of Asteroids Part 2.py","file_name":"Sunny with a Chance of Asteroids Part 2.py","file_ext":"py","file_size_in_byte":4075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"231161912","text":"import random\nprint(\"Welcome to Number guessing game.\")\nname = input(\"Your name: \")\nprint(\"Hello \",name,\", I have chosen a number between 1 and 10 in mind.\\nYou have five guesses.\\nGoodluck.\")\nrandom_number = random.randint(1,10)\ni = 0\nwhile(i < 5):\n answer = int(input(\":\"))\n if answer == random_number:\n print(\"Great, you have guessed the number in \",i + 1,\"guesses\\nanswer= \",answer)\n break\n else:\n print(5 - i - 1,\"chances left\")\n i += 1\nelse:\n print(\"sorry Sherlock, your guesses havent been accurate this time.\\nThe answer is \",random_number)\n\nprint(\"bye \",name,\" until next time\")\n","sub_path":"Fun/Guessing number/guessing_numbers.py","file_name":"guessing_numbers.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"298865721","text":"import sys\n\n\ndef cross_out_multiples_print(end_of_range):\n array = range(2, int(end_of_range))\n for i in array:\n n = 2\n result = 1\n while result < end_of_range: \n result = n*i\n if result in array:\n array.remove(result)\n n+=1\n else:\n n+=1\n print_array = \",\".join(str(i) for i in array)\n return print_array\n","sub_path":"moderate/primes-numbers/primes.py","file_name":"primes.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"19354241","text":"__author__ = 'felix.shaw@tgac.ac.uk - 04/08/15'\n\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': True,\n 'formatters': {\n 'standard': {\n 'format' : \"[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s\",\n 'datefmt' : \"%d/%b/%Y %H:%M:%S\"\n },\n },\n 'handlers': {\n 'null': {\n 'level':'DEBUG',\n 'class':'django.utils.log.NullHandler',\n },\n 'console':{\n 'level':'INFO',\n 'class':'logging.StreamHandler',\n 'formatter': 'standard',\n 'stream' : 'ext://sys.stdout'\n },\n },\n 'loggers': {\n 'django': {\n 'handlers':['console'],\n 'propagate': True,\n 'level':'WARN',\n },\n 'django.db.backends': {\n 'handlers': ['console'],\n 'level': 'DEBUG',\n 'propagate': False,\n },\n }\n}\n","sub_path":"web/project_copo/settings/settings_logging.py","file_name":"settings_logging.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"297292373","text":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# Odoo\n# Copyright (C) 2013-2019 CodUP ().\n#\n##############################################################################\nimport time\nfrom odoo import api, fields, models\nfrom odoo import netsvc\n\nclass RegulatoryTechnicalFileCreationDone(models.TransientModel):\n _name = 'regulatory.technical.file.creation.done'\n _description = 'Done to the Creation Request'\n\n technical_file_id = fields.Many2one('regulatory.technical.file', string='Technical File Number', required=True, track_visibility='onchange')\n technical_file_name = fields.Char(related='technical_file_id.technical_file_name', string='Technical File Name', track_visibility='onchange')\n contact_id = fields.Many2one('res.partner', string='Contact', required=True)\n\n def done_creation_request(self):\n active_id = self._context.get('active_id')\n if active_id:\n request = self.env['regulatory.technical.file.creation'].browse(self._context.get('active_id'))\n request.write({'technical_file_id': self.technical_file_id.id})\n request.write({'contact_id': self.contact_id.id})\n request.action_confirm()\n request.action_done()\n return {'type': 'ir.actions.act_window_close',}\n","sub_path":"regulatory_issue/wizard/done_tfc.py","file_name":"done_tfc.py","file_ext":"py","file_size_in_byte":1354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"233376513","text":"\"\"\"\nFor that example ODD_PLATFORM_HOST and ODD_PLATFORM_TOKEN environment variables must be set.\n\nexport ODD_PLATFORM_HOST=\"http://localhost:8080\"\nexport ODD_PLATFORM_TOKEN=*ODD_PLATFORM_TOKEN can be obtained from the OpenDataDiscovery platform UI.*\n\nThis example shows how to use the DataSource class to manual discovery for AWS.\nThe DataSource takes an ODDRN uniquely defines source for all data assets that are added to it.\n\nThe DataSource class is a context manager for the discovery process. All assets that are added to the DataSource will be\ningested into the ODD platform (using variables above) when the context is closed.\n\nIn that example we create a DataSource for AWS and add a Lambda function to it with one input and list of outputs.\n\"\"\"\n\nfrom odd_models.discovery import DataSource\nfrom odd_models.discovery.data_assets import AWSLambda, S3Artifact\nfrom odd_models.discovery.data_assets.data_asset_list import DataAssetsList\n\nwith DataSource(\"//cloud/aws/dev\") as data_source:\n validation_lambda = AWSLambda.from_params(\n region=\"eu-central-1\", account=\"0123456789\", function_name=\"validation\"\n )\n input_artifact = S3Artifact.from_url(\"s3://bucket/folder/test_data.csv\")\n\n results = S3Artifact.from_url(\"s3://bucket/folder/test_result.csv\")\n metrics = S3Artifact.from_url(\"s3://bucket/folder/test_metrics.json\")\n\n input_artifact >> validation_lambda >> DataAssetsList([results, metrics])\n\n data_source.add_data_asset(validation_lambda)\n","sub_path":"examples/lambda_discovery.py","file_name":"lambda_discovery.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"186256924","text":"import FWCore.ParameterSet.Config as cms\nfrom Configuration.StandardSequences.Eras import eras\nprocess = cms.Process('BParkNANO',eras.Run2_2018)\n\n\nisMC = False\nwantFullRECO = False\n\n# import of standard configurations\nprocess.load('Configuration.StandardSequences.Services_cff')\nprocess.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')\nprocess.load('FWCore.MessageService.MessageLogger_cfi')\nprocess.load('Configuration.EventContent.EventContent_cff')\nprocess.load('Configuration.StandardSequences.GeometryRecoDB_cff')\nprocess.load('Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff')\nprocess.load('PhysicsTools.BParkingNano.nanoBPark_cff')\nprocess.load('Configuration.StandardSequences.EndOfProcess_cff')\nprocess.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')\n\nprocess.maxEvents = cms.untracked.PSet(\n input = cms.untracked.int32(1000)\n)\n\n\nglobaltag = '102X_dataRun2_Sep2018Rereco_v1'\ninputFiles = cms.untracked.vstring('/store/data/Run2018D/ParkingBPH5/MINIAOD/20Mar2019-v1/120000/0071842F-3A26-0D43-92F2-E2376273008E.root')\noutputFileNANO = cms.untracked.string('testBParkNANO_data_10215.root')\noutputFileFEVT = cms.untracked.string('testBParkFullEvt_data_10215.root')\n\nif isMC:\n globaltag = '102X_upgrade2018_realistic_v15'\n inputFiles = cms.untracked.vstring('/store/user/bainbrid/lowpteleid/BuToKJpsi_Toee_MuFilter_SoftQCDnonD_TuneCP5_13TeV-pythia8-evtgen/crab_lowpteleid/190328_152903/0000/step3_inMINIAODSIM_99.root')\n outputFileNANO = cms.untracked.string('testBParkNANO_mc_10215.root')\n outputFileFEVT = cms.untracked.string('testBParkFullEvt_mc_10215.root')\n\n\n# Input source\nprocess.source = cms.Source(\"PoolSource\",\n fileNames = inputFiles,\n secondaryFileNames = cms.untracked.vstring()\n)\n\nprocess.options = cms.untracked.PSet(\n\n)\n\n# Production Info\nprocess.configurationMetadata = cms.untracked.PSet(\n annotation = cms.untracked.string('test_data_10213 nevts:100'),\n name = cms.untracked.string('Applications'),\n version = cms.untracked.string('$Revision: 1.19 $')\n)\n\n# Output definition\nprocess.FEVTDEBUGHLToutput = cms.OutputModule(\"PoolOutputModule\",\n dataset = cms.untracked.PSet(\n dataTier = cms.untracked.string('GEN-SIM-RECO'),\n filterName = cms.untracked.string('')\n ),\n fileName = outputFileFEVT,\n outputCommands = (cms.untracked.vstring('keep *')),\n splitLevel = cms.untracked.int32(0)\n)\n\nprocess.NANOAODoutput = cms.OutputModule(\"NanoAODOutputModule\",\n compressionAlgorithm = cms.untracked.string('LZMA'),\n compressionLevel = cms.untracked.int32(9),\n dataset = cms.untracked.PSet(\n dataTier = cms.untracked.string('NANOAOD'),\n filterName = cms.untracked.string('')\n ),\n fileName = outputFileNANO,\n outputCommands = process.NANOAODEventContent.outputCommands\n)\n\n\n# Additional output definition\n\n# Other statements\nfrom Configuration.AlCa.GlobalTag import GlobalTag\nprocess.GlobalTag = GlobalTag(process.GlobalTag, globaltag, '')\n\n\n# Path and EndPath definitions\nprocess.nanoAOD_step = cms.Path(process.nanoSequence)\nprocess.endjob_step = cms.EndPath(process.endOfProcess)\nprocess.FEVTDEBUGHLToutput_step = cms.EndPath(process.FEVTDEBUGHLToutput)\nprocess.NANOAODoutput_step = cms.EndPath(process.NANOAODoutput)\n\n\n# Schedule definition\nprocess.schedule = cms.Schedule(process.nanoAOD_step,process.endjob_step, process.NANOAODoutput_step)\nif wantFullRECO:\n process.schedule = cms.Schedule(process.nanoAOD_step,process.endjob_step, process.FEVTDEBUGHLToutput_step, process.NANOAODoutput_step)\nfrom PhysicsTools.PatAlgos.tools.helpers import associatePatAlgosToolsTask\nassociatePatAlgosToolsTask(process)\n\nfrom PhysicsTools.BParkingNano.nanoBPark_cff import *\nprocess = nanoAOD_customizeMuonTriggerBPark(process)\nprocess = nanoAOD_customizeElectronFilteredBPark(process)\n\n# customisation of the process.\nif isMC:\n from PhysicsTools.BParkingNano.nanoBPark_cff import nanoAOD_customizeMC\n process = nanoAOD_customizeMC(process)\n\n\nprocess.load(\"TrackingTools/TransientTrack/TransientTrackBuilder_cfi\")\nfrom Configuration.StandardSequences.earlyDeleteSettings_cff import customiseEarlyDelete\nprocess = customiseEarlyDelete(process)\n","sub_path":"BParkingNano/test/test_BParkSequence_10215.py","file_name":"test_BParkSequence_10215.py","file_ext":"py","file_size_in_byte":4189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"466447090","text":"import argparse\n\nimport gym\nimport torch\nimport sys\nsys.path.append(\"..\")\nfrom models.Dense_agent import Dense_Policy\n\nparser = argparse.ArgumentParser(description='PyTorch REINFORCE example')\nparser.add_argument('--gamma', type=float, default=0.99, metavar='G', help='discount factor (default: 0.99)')\nparser.add_argument('--seed', type=int, default=543, metavar='N', help='random seed (default: 543)')\nparser.add_argument('--render', action='store_true', help='render the environment')\nargs = parser.parse_args()\n\nenv = gym.make('CartPole-v0')\nenv.seed(args.seed)\ntorch.manual_seed(args.seed)\n\npolicy = Dense_Policy(input_features=4, action_nums=2)\npolicy.load_state_dict(torch.load('result.pt.tar'))\n\nstate, ep_reward, done,T = env.reset(), 0, False,0\nwhile not done:\n action = policy.select_action(state)\n state, reward, done, _ = env.step(action)\n env.render()\n ep_reward += reward\n T+=1\n\nprint(\"Solved! Episode reward is now {} and the last episode runs to {} time steps!\".format(ep_reward, T))\n","sub_path":"07-REINFORCE/CartPole-v1/play.py","file_name":"play.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"231329941","text":"\nfrom django.urls import path, include\nfrom . import views\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\n\napp_name=\"blog\"\nurlpatterns = [\n path('', views.index, name='index'),\n path('new/', views.post_new, name='post_new'),\n path('/', views.post_detail, name='post_detail'),\n path('/edit', views.post_edit, name='post_edit'),\n path('/delete', views.PostDeleteView.as_view(), name='post_delete'),\n path('/comments', views.comment_list, name=\"comment_list\"),\n path('/comments/new/', views.comment_new, name=\"comment_new\"),\n path('/comments//edit/', views.comment_edit, name=\"comment_edit\"),\n path('/comments//delete/', views.comment_delete, name=\"comment_delete\"),\n path('post.json/', views.post_list_json),\n path('api/v1/', include('blog.api')),\n]\n","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"112711004","text":"\nfrom urllib import request, parse\n#负责处理json格式的模块\nimport json\nimport random\n\n'''\nhttp://www.qiongqi.tech/vote.html\n自动投票\n发现其返回的数据结构为\nvoteList:22,23 目标为23\nv:7377 随机4位数字,防止缓存用\n请求页面为http://www.qiongqi.tech/vote\n\n大致流程是:\n1. 利用data构造内容,然后urlopen打开\n2. 返回一个json格式的结果\n3. 结果就应该是girl的释义\n'''\n\n# 尝试设置代理\nproxy = {'http': '117.127.0.196:80'}\nproxy_support = request.ProxyHandler(proxy)\nopener = request.build_opener(proxy_support)\n\n\nbaseurl = 'http://www.qiongqi.tech/vote'\n\n# 存放用来模拟form的数据一定是dict格式\nkw = ['21', '23']\nkw = ','.join(kw)\ndata = {\n 'voteList': kw,\n 'v': random.randint(1001, 9999)\n}\n\nprint(data)\n# 需要使用parse模块队data进行编码\ndata = parse.urlencode(data).encode()\n\n# 构造一个请求头,请求头部应该至少包含传入的数据长度\n# request要求传入的请求头是一个dict格式\n\nheaders = {\n # 因为使用的是post请求,至少应该包含content - length 字段\n 'Content-Length': len(data)\n}\n'''\n'Host': \"www.qiongqi.tech\",\n'Origin': \"http://www.qiongqi.tech\",\n'Referer': \"http://www.qiongqi.tech/vote.html\"\n'''\n# 构造一个Request的实例\nreq = request.Request(baseurl, data=data, headers=headers)\n\n# 以为已经构造了一个Request的请求实例,则所有的请求信息都可以封装在Request实例中\n# 有了header,data,url,就可以发出请求了\n# rsp = request.urlopen(req)\n\n# 使用代理打开\nrsp = opener.open(baseurl, data=data)\njson_data = rsp.read().decode()\n\nprint(json_data)\n\n# 把json字符串转化成字典\njson_data = json.loads(json_data)\nprint(json_data)\n\n'''\nfor item in json_data['data']:\n print(item['k'], \"--\", item['v'] )\n'''","sub_path":"exer_code/auto_vote/auto_vote_proxy.py","file_name":"auto_vote_proxy.py","file_ext":"py","file_size_in_byte":1812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"251633397","text":"# %%\nimport os\nfrom shutil import copyfile\nfrom pathlib import Path\nimport time\nimport re\nimport pandas as pd\nimport numpy as np\nimport warnings\n\nwarnings.filterwarnings('ignore')\n\n# os.chdir('/home/andreykozinov/YandexGPU/HerbsKLS')\n\ndef change_file_extension(filename):\n name, ext = os.path.splitext(filename)\n if ext == \".JPG\":\n os.rename(filename, name + \".jpg\")\n\n\ndef rename_files(filename, directory):\n if Path().joinpath('raw_data', '2021') == Path(directory):\n return ''.join(re.findall(r'\\d', filename)) + '_2021'\n if Path().joinpath('raw_data', '2020', 'samples') == Path(directory):\n return ''.join(re.findall(r'\\d', filename)) + '_2020_sample'\n if Path().joinpath('raw_data', '2020', 'food') == Path(directory):\n return ''.join(re.findall(r'\\d', filename)) + '_2020_food'\n if Path().joinpath('raw_data', '2020') == Path(directory):\n return ''.join(re.findall(r'\\d', filename)) + '_2020'\n if Path().joinpath('raw_data', '2019') == Path(directory):\n return ''.join(re.findall(r'\\d', filename)) + '_2019'\n\n\nos.makedirs(Path().joinpath('data'))\ntime.sleep(3)\ntemp_folder = 'data/'\n\nfor subdir, dirs, files in os.walk('raw_data'):\n for file in files:\n src_file = Path().joinpath(subdir, file)\n dest_file = Path().joinpath(temp_folder, file)\n copyfile(src_file, dest_file)\n\n new_file_name = rename_files(filename=temp_folder + file, directory=subdir)\n name, ext = os.path.splitext(temp_folder + file)\n os.rename(name + ext, temp_folder + new_file_name + ext)\n change_file_extension(temp_folder + new_file_name + ext)\n\nquality_control = pd.read_excel('certificates/certificates_2018_2019.xlsx')\nquality_control.data = pd.to_datetime(quality_control.data, dayfirst=True).dt.year.apply(str)\nquality_control.number = quality_control.number.str[-3:].str.lstrip('0')\n\nfile_name_table = []\nfor idx, row in quality_control.iterrows():\n if row[3] == 'drug':\n drug = row[1] + '_' + row[0]\n file_name_table.append(drug)\n else:\n other = row[1] + '_' + row[0] + '_' + row[3]\n file_name_table.append(other)\n\nquality_control['file_name'] = file_name_table\nquality_control = quality_control[['file_name', 'item']]\n\ntemp_folder = 'data/'\nfiles_list = [name.split('.')[0] for name in os.listdir(Path().joinpath(temp_folder))]\nprint(\"Количество фотографий = \", len(files_list))\n\nfile_df = pd.DataFrame(files_list).rename(columns={0: 'file_name'})\nquality_control = quality_control.merge(file_df, how='right', on='file_name')\nquality_control.dropna(inplace=True)\nprint(\"Количество фото после объединения\", quality_control.shape[0])\n\n\ndef train_test_split(df, proportion):\n # Создаем колонку number с количеством изображений по каждому наименованию для train\n split = (proportion * df.groupby('item').count()).reset_index()\n split['file_name'] = split['file_name'].apply(np.ceil)\n split = split.rename(columns={'file_name': 'train_num'})\n df = pd.merge(df, split, how='left', on='item')\n df['split'] = 0\n\n # Если количество изображений по наименованию составляет 1, то дублируем эту строку\n single_group = df.loc[df['train_num'] == 1]\n df = df.append(single_group, ignore_index=True)\n df = df.sort_values(by='item').reset_index(drop='True')\n\n # Создаем колонку split со значениями train и validation\n # Train = количество изображений по каждой категории * процент разделения\n # Validation = количество изображений по каждой категории - Train\n counter = 0\n for i in range(0, df.shape[0] - 1):\n if df['item'][i] == df['item'][i + 1]:\n if counter < df['train_num'][i]:\n df['split'][i] = 'train'\n counter += 1\n else:\n df['split'][i] = 'test'\n counter += 1\n else:\n df['split'][i] = 'test'\n counter = 0\n # Присоединяем к номеру изображения расширение jpg\n df['file_name'] = df['file_name'] + '.jpg'\n\n return df.drop('train_num', 1)\n\n\nquality_control = train_test_split(quality_control, 0.7)\nquality_control.iloc[-1]['split'] = 'test'\nfiles_list = [name.lower() for name in os.listdir(Path().joinpath(temp_folder))]\n\nos.makedirs(Path().joinpath(temp_folder, 'dataset'))\nos.makedirs(Path().joinpath(temp_folder, 'dataset', 'train'))\nos.makedirs(Path().joinpath(temp_folder, 'dataset', 'val'))\nfor row in quality_control.iterrows():\n if row[1][0] in files_list:\n if row[1][2] == 'train':\n if os.path.exists(Path().joinpath(temp_folder, 'dataset', 'train', row[1][1])):\n copyfile(Path().joinpath(temp_folder, row[1][0]),\n Path().joinpath(temp_folder, 'dataset', 'train', row[1][1], row[1][0]))\n else:\n os.makedirs(Path().joinpath(temp_folder, 'dataset', 'train', row[1][1]))\n copyfile(Path().joinpath(temp_folder, row[1][0]),\n Path().joinpath(temp_folder, 'dataset', 'train', row[1][1], row[1][0]))\n if row[1][2] == 'test':\n if os.path.exists(Path().joinpath(temp_folder, 'dataset', 'val', row[1][1])):\n copyfile(Path().joinpath(temp_folder, row[1][0]),\n Path().joinpath(temp_folder, 'dataset', 'val', row[1][1], row[1][0]))\n else:\n os.makedirs(Path().joinpath(temp_folder, 'dataset', 'val', row[1][1]))\n copyfile(Path().joinpath(temp_folder, row[1][0]),\n Path().joinpath(temp_folder, 'dataset', 'val', row[1][1], row[1][0]))\n","sub_path":"scripts/create_dataset.py","file_name":"create_dataset.py","file_ext":"py","file_size_in_byte":5909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"521988398","text":"import numpy as np\nfrom sklearn import linear_model\nfrom sklearn.externals import joblib\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.metrics import classification_report\nimport time\nimport sys\n\ndef loadX(filename_train_X):\n train_X = np.loadtxt(filename_train_X, dtype='int', delimiter=',')\n return train_X\n\ndef loady(filename_train_y):\n train_y = np.loadtxt(filename_train_y, dtype='int', delimiter=',')\n return train_y\n\ndef train(filename_model, train_X, train_y, filename_train_y_predict):\n start = time.time()\n print(start)\n\n clf = linear_model.LogisticRegression(C=0.01, class_weight='balanced')\n clf.fit(train_X, train_y)\n\n print(clf.coef_)\n print(clf.intercept_)\n #np.savetxt(filename_coef, clf.coef_, delimiter=',')\n #np.savetxt(filename_intercept, clf.intercept_, delimiter=',')\n\n joblib.dump(clf, filename_model)\n\n train_y_predict = clf.predict(train_X)\n print(mean_squared_error(train_y, train_y_predict))\n print(classification_report(train_y, train_y_predict))\n\n np.savetxt(filename_train_y_predict, train_y_predict, delimiter=',', fmt=\"%.0f\")\n\n end = time.time()\n print(end)\n\n print('training time :', str(end-start))\n\ndef main():\n filename_train_X = sys.argv[1]\n filename_train_y = sys.argv[2]\n filename_model = sys.argv[3]\n filename_train_y_predict = sys.argv[4]\n\n train_X = loadX(filename_train_X)\n train_y = loady(filename_train_y)\n\n train(filename_model, train_X, train_y, filename_train_y_predict)\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"logistic.py","file_name":"logistic.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"536077623","text":"# gof.py - Goodness-of-fit statistics\r\n# -----------------------------------\r\n# This file is a part of DeerLab. License is MIT (see LICENSE.md).\r\n# Copyright(c) 2019-2021: Luis Fabregas, Stefan Stoll and other contributors.\r\n\r\nimport numpy as np\r\nimport deerlab as dl\r\nimport warnings\r\n\r\ndef goodness_of_fit(x,xfit,Ndof,noiselvl):\r\n r\"\"\" \r\n Goodness of Fit statistics\r\n ==========================\r\n\r\n Computes multiple statistical indicators of goodness of fit.\r\n\r\n Usage:\r\n ------\r\n stats = goodness_of_fit(x,xfit,Ndof)\r\n\r\n Arguments: \r\n ----------\r\n x (N-element array)\r\n Original data\r\n xfit (N-element array)\r\n Fit\r\n Ndog (scalar, int)\r\n Number of degrees of freedom\r\n noiselvl (scalar)\r\n Standard dexiation of the noise in x.\r\n\r\n Returns:\r\n --------\r\n stats (dict)\r\n Statistical indicators:\r\n stats['chi2red'] - Reduced chi-squared\r\n stats['rmsd'] - Root mean-squared dexiation\r\n stats['R2'] - R-squared test\r\n stats['aic'] - Akaike information criterion\r\n stats['aicc'] - Corrected Akaike information criterion\r\n stats['bic'] - Bayesian information criterion\r\n\r\n \"\"\"\r\n sigma = noiselvl\r\n\r\n # Get number of xariables\r\n N = len(x)\r\n # Extrapolate number of parameters\r\n Q = Ndof - N\r\n\r\n # Reduced Chi-squared test\r\n chi2red = 1/Ndof*np.linalg.norm(x - xfit)**2/sigma**2\r\n\r\n # R-squared test\r\n R2 = 1 - np.sum((x-xfit)**2)/np.sum((xfit-np.mean(xfit))**2)\r\n\r\n # Root-mean square dexiation\r\n rmsd = np.sqrt(np.sum((x-xfit)**2)/N)\r\n\r\n # Log-likelihood\r\n loglike = N*np.log(np.linalg.norm(x - xfit)**2/N)\r\n\r\n # Akaike information criterion\r\n aic = loglike + 2*Q\r\n\r\n # Corrected Akaike information criterion\r\n aicc = loglike + 2*Q + 2*Q*(Q+1)/(N-Q-1)\r\n\r\n # Bayesian information criterion\r\n bic = loglike + Q*np.log(N)\r\n\r\n return {'chi2red':chi2red,'R2':R2,'rmsd':rmsd,'aic':aic,'aicc':aicc,'bic':bic}","sub_path":"deerlab/utils/gof.py","file_name":"gof.py","file_ext":"py","file_size_in_byte":2016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"10674794","text":"#!/usr/bin/python\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n# Thomas Quintana \n\nimport os\n\nfrom argparse import ArgumentParser\n\nfrom jinja2 import Environment, FileSystemLoader\n\ndef main():\n parser = ArgumentParser(\n description=\"Renders a Jinja2 template using command line arguments or \" +\n \"environment variables as parameters for the template.\"\n )\n parser.add_argument(\n \"-s\", dest=\"source\", required=True,\n help=\"The path to the source template.\"\n )\n parser.add_argument(\n \"-d\", dest=\"destination\", required=True,\n help=\"The path to the destination file.\"\n )\n parser.add_argument(\n \"-ps\", dest=\"parameter_source\", choices=[\"environment\", \"cli\"], required=True,\n help=\"The source of the parameters for the template. The options \" +\n \"are 'environment' if you want to use the environment variables \" +\n \"as parameters or 'cli' if you want to pass in the parameters \" +\n \"as arguments to this script. Note: All parameter names are \" +\n \"converted to lower case irrelevant of the parameters source.\"\n )\n parser.add_argument(\n \"-p\", dest=\"parameters\", nargs=\"*\", required=False,\n help=\"A parameter to pass into the template renderer if the \" +\n \"parameters-source is 'cli'. (e.g. -p name=value)\"\n )\n args = parser.parse_args()\n # Load the parameters.\n parameters = None\n if args.parameter_source == \"cli\":\n parameters = args.parameters\n elif args.parameter_source == \"environment\":\n parameters = dict(os.environ)\n parameters = to_lowercase_keys(parameters)\n # Load the templates.\n folder_name, file_name = split_path(args.source)\n if folder_name is None:\n folder_name = os.getcwd()\n loader = FileSystemLoader(folder_name)\n templates = Environment(loader=loader)\n # Render the template.\n template = templates.get_template(file_name).render(parameters)\n # Write the rendered template to disk.\n with open(args.destination, \"wb\") as output:\n output.write(template)\n\ndef split_path(path):\n split_point = path.rfind(\"/\")\n if not split_point == -1:\n return path[:split_point], path[split_point + 1:]\n else:\n return None, path\n\ndef to_lowercase_keys(parameters):\n if isinstance(parameters, list):\n return {k.lower(): v for k, v in [p.split(\"=\") for p in parameters]}\n elif isinstance(parameters, dict):\n return {k.lower(): v for k, v in parameters.iteritems()}\n else:\n raise ValueError(\"Invalid type for parameters.\")\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"trt/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":3257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"439631518","text":"from django.conf.urls import patterns, url\n\nfrom django.contrib import admin\n\nadmin.autodiscover()\n\n#user views\nurlpatterns = patterns('jobs.views.user_views',\n url(r'^login/$', 'login_view'),\n url(r'^block_user/(?P\\d+)/$', 'block_user'),\n url(r'^show_user/(?P\\d+)/$', 'show_user'),\n url(r'^write_note/(?P\\d+)/$', 'write_note'),\n url(r'^edit_profile/(?P\\d+)/$', 'edit_profile'),\n)\n\n#job application views\nurlpatterns += patterns('jobs.views.jobapplication_views',\n url(r'^apply_to_job/(?P\\d+)/$', 'apply_to_job'),\n url(r'^apply_cvs/(?P\\d+)/$', 'apply_cvs'),\n url(r'^show_applications_jobs/$', 'show_applications_for_jobs'),\n url(r'^show_applications_cvs/$', 'show_applications_for_cvs'),\n url(r'^update_jobapplication/(?P\\d+)/$', 'update_jobapplication'),\n)\n\n#job views\nurlpatterns += patterns('jobs.views.job_views',\n url(r'^$', 'list_recent_jobs'),\n url(r'^create_job/$', 'create_job'),\n url(r'^edit_job/(?P\\d+)/$', 'edit_job'),\n url(r'^delete_job/(?P\\d+)/$', 'delete_job'),\n url(r'^show_job/(?P\\d+)/$', 'show_job'),\n url(r'^job_list/$', 'show_own_jobs_list'),\n)\n\n#cv views\nurlpatterns += patterns('jobs.views.cv_views',\n url(r'^cvlist/$', 'show_own_cv_list'),\n url(r'^cv/(?P\\d+)/$', 'show_cv'),\n url(r'^create_cv/$', 'create_cv'),\n url(r'^edit_cv/(?P\\d+)/$', 'edit_cv'),\n url(r'^delete_cv/(?P\\d+)/$', 'delete_cv'),\n)\n\n\n","sub_path":"jobs/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"108907594","text":"import sqlite3\n\n#创建链接\ncon = sqlite3.connect(\"test.db\")\n# print(con)\n\n#创建游标对象\ncur = con.cursor()\n\n#编写创建表的sql语句\nsql ='create table t_person(' \\\n 'pon INTEGER primary key autoincrement,' \\\n 'pname VARCHAR not null ,' \\\n 'age INTEGER ' \\\n ')'\n\ntry:\n #执行sql语句\n cur.execute(sql)\n print('创建成功')\nexcept Exception as e:\n print(e)\n print(\"创建失败\")\nfinally:\n #关闭游标\n cur.close()\n #关闭连接\n con.close()","sub_path":"数据库/sqlite3模块创建表.py","file_name":"sqlite3模块创建表.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"341132658","text":"\"\"\"Team Lookup logic.\"\"\"\nimport math\nimport urllib\nfrom urllib.request import urlopen, Request\n\nfrom bs4 import BeautifulSoup\n\nfrom steamtest import *\n\n\ndef extract_id_user(players):\n player_dict = {}\n for player in players:\n player = str(player)\n # print(player)\n split = player.find(\"href=\\\"\")\n\n steam_id = player[:split]\n steam_id = steam_id[\n steam_id.find(\"ID\") + 4: steam_id[1:].find(\"<\") - 2]\n\n user_name = player[split + 1:]\n user_name = user_name[user_name.find(\">\") + 1:user_name.find(\"<\")]\n\n # print(\"steamid is\" , steam_id)\n # print(\"username is\", user_name)\n # TODO write dotabuff to file maybe\n dotabuff_link = \"https://dotabuff.com/players/\" + str(\n convert_text_to_32id(steam_id))\n opendota_link = \"https://www.opendota.com/players/\" + str(\n convert_text_to_32id(steam_id))\n # print(\"Dota Buff is \", dotabuff_link)\n # print(\"Open Dota is \", opendota_link)\n player_dict[user_name] = {}\n player_dict[user_name][\"steam_id\"] = steam_id\n player_dict[user_name][\"dotabuff_link\"] = dotabuff_link\n player_dict[user_name][\"opendota_link\"] = opendota_link\n\n return player_dict\n\n\ndef query_opendota_api(player_dict):\n for username in player_dict:\n player = player_dict[username]\n steam_32id = convert_text_to_32id(player[\"steam_id\"])\n solommr, mmr_estimate, rank_number, leaderboard_rank = get_account_info(\n steam_32id)\n player[\"solommr\"] = solommr\n player[\"mmr_estimate\"] = mmr_estimate\n player[\"rank\"] = \"\"\n player[\"stars\"] = \"\"\n player[\"heroes\"] = get_account_heroes(steam_32id)\n if rank_number == None:\n player[\"badge\"] = \"Unranked\"\n elif leaderboard_rank != None:\n player[\"rank\"] = leaderboard_rank\n player[\"badge\"] = \"Immortal\"\n else:\n badges = [\"Herald\", \"Guardian\", \"Crusader\", \"Archon\", \"Legend\",\n \"Ancient\", \"Divine\"]\n player[\"badge\"] = badges[math.floor(rank_number / 10) - 1];\n player[\"stars\"] = rank_number % 10;\n\n player_dict[username] = player\n\n return player_dict\n\n\ndef print_player_info(player_dict):\n for username in player_dict:\n player = player_dict[username]\n\n if player[\"rank\"]:\n print(username, \": Rank \", player[\"rank\"])\n else:\n print(username, \": \", player[\"badge\"], \" \", player[\"stars\"])\n\n print(\"SOLO MMR: \", player[\"solommr\"], \" MMR ESTIMATE: \",\n player[\"mmr_estimate\"])\n print(player[\"dotabuff_link\"])\n print(player[\"opendota_link\"])\n print()\n\n return\n\n\ndef player_info_to_string(player_dict, team_name):\n return_strings = [team_name + \"\\n\\n\"]\n for username, player in player_dict.items():\n return_string = \"\"\n return_string += \"CSL USERNAME IS: \" + str(\n username or \"\") + \"\\n\"\n return_string += \"SOLO MMR: \" + str(\n player[\"solommr\"] or \"\") + \" MMR ESTIMATE: \" + str(\n player[\"mmr_estimate\"] or \"\") + \"\\n\"\n return_string += \"RANK TIER: \" + str(\n player[\"badge\"] or \"\") + \" \" + str(player[\"stars\"]) + str(\n player[\"rank\"] or \"\") + \"\\n\"\n\n return_string += str(\n player[\"dotabuff_link\"] or \"\") + \"\\n\"\n return_string += str(\n player[\"opendota_link\"] or \"\") + \"\\n\"\n\n return_string += \"5 MOST PLAYED HEROES (last 100 games):\\n\"\n for hero in player[\"heroes\"]:\n return_string += hero['loc_name'] + \":\\t\"\n return_string += str(hero['games']) + \" games, \"\n return_string += f\"{100 * hero['winrate']:.2f}\" + \"% winrate\\n\"\n return_string += \"\\n\"\n return_strings.append(return_string)\n return return_strings\n\n\ndef extract_team_id(team_banner_div):\n # TODO: Don't do this with brute force\n team_banner_div = str(team_banner_div[0])\n team_banner_div = team_banner_div[\n team_banner_div.find(\"h3\") + 3:team_banner_div.find(\n \"\")]\n team_banner_div = team_banner_div[\n team_banner_div.find(\">\") + 1:team_banner_div.find(\n \"\")]\n return team_banner_div\n\n\ndef look_up_team(team_id):\n url = \"https://cstarleague.com/dota2/teams/\"\n url = url + str(team_id)\n print(\"Looking up URL \" + url)\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}\n\n try:\n request = Request(url, headers=headers)\n content = urlopen(request).read()\n soup = BeautifulSoup(content, 'lxml')\n\n team_name_div = soup.findAll(\"div\", {\"class\": \"hero-title\"})\n team_name = extract_team_id(team_name_div)\n\n # print(team_name)\n\n players = soup.findAll(\"span\", {\"class\": \"tool-tip\"})\n\n player_dict = extract_id_user(players)\n\n player_dict = query_opendota_api(player_dict)\n\n # print_player_info(player_dict)\n return player_info_to_string(player_dict, team_name)\n except urllib.error.HTTPError as e:\n print(\"404 error\")\n return \"\"\n\n\nif __name__ == \"__main__\":\n # michigan\n url = 839\n\n print(look_up_team(url))\n print(look_up_team(7542))\n","sub_path":"teamlookup.py","file_name":"teamlookup.py","file_ext":"py","file_size_in_byte":5399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"546071314","text":"# Copyright 2016 Mirantis, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\nimport pytest\n\nfrom fuel_ccp_tests import logger\nfrom fuel_ccp_tests import settings\nfrom fuel_ccp_tests.helpers import post_os_deploy_checks\n\nLOG = logger.logger\n\n\nclass TestPreCommitMariadb(object):\n \"\"\"docstring for TestPreCommitMariadb\n \"\"\"\n\n @pytest.mark.test_mariadb_on_commit\n @pytest.mark.revert_snapshot(settings.PRECOMMIT_SNAPSHOT_NAME)\n def test_deploy_os_with_custom_mariadb(\n self, ccpcluster, k8s_actions, config, underlay, namespace='ccp'):\n \"\"\"Precommit test for mariadb\n\n Scenario:\n 1. Install k8s\n 2. Install fuel-ccp\n 3. Fetch all repositories\n 4. Fetch mariadb from review\n 6. Build containers\n 7. Deploy openstack\n 8. Check db\n \"\"\"\n\n if settings.REGISTRY == '127.0.0.1:31500':\n k8s_actions.create_registry()\n ccpcluster.fetch()\n ccpcluster.update_service('mariadb')\n ccpcluster.build(suppress_output=False)\n\n ccpcluster.deploy()\n\n post_os_deploy_checks.check_jobs_status(k8s_actions.api, timeout=2500)\n post_os_deploy_checks.check_pods_status(k8s_actions.api)\n\n LOG.info(\"Getting pod id\")\n cluster = k8s_actions.api\n cluster_list = cluster.pods.list(namespace=namespace)\n pod_id = [pod.name for pod in cluster_list if 'maria' in pod.name][0]\n LOG.info(\"Pod ID is {0}\".format(pod_id))\n\n cmd = \"mysql -uroot -ppassword -s -e 'SHOW DATABASES;'\"\n pod_exec = \\\n \"kubectl exec -i {pod_id} --namespace=ccp -- {cmd}\".format(\n pod_id=pod_id, cmd=cmd)\n result = underlay.check_call(pod_exec, host=config.k8s.kube_host)\n base_databases = ['nova', 'keystone', 'neutron']\n result = [elem.rstrip() for elem in result['stdout']]\n\n assert set(base_databases).issubset(set(result)), \\\n \"Mariadb does not contain minimal set of databases, \" \\\n \"cirrent set is {0}\".format(set(result))\n","sub_path":"fuel_ccp_tests/tests/system/pre_commit/test_mariadb.py","file_name":"test_mariadb.py","file_ext":"py","file_size_in_byte":2591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"636055124","text":"# Importando as bibliotecas para importação e exportação de dados, realização de operações matemáticas, plotação gráfica de resultados e criação da GUI:\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom scipy.integrate import odeint\r\nfrom tkinter import *\r\nimport tkinter as tk\r\n\r\n##Para o cálculo do produto-atribuir uma varipavel p\r\n###0-associado ao crescimento;1-não associado ao crescimento;2-parcialmente associado ao crescimento\r\n\r\n# Criação da janela para a geração da interface através da qual o usuário poderá selecionar os resultados que deseja acessar:\r\nwindow = Tk()\r\nwindow.title(\"Simulação Matemática de Processos Fermentativos Microbianos\")\r\nwindow.geometry('1070x400')\r\n\r\nLabel_title_1= Label (window, text = \"Fermentação Microbiana em Batelada\")\r\nLabel_title_1[\"font\"] = (\"Times\", \"14\", \"bold\")\r\nLabel_title_1[\"fg\"] = (\"black\")\r\nLabel_title_1.grid(column=0, row=0)\r\n\r\n#Importando os dados das constantes cinéticas e concentrações diretamente da planilha do Excel, pelo pacote Pandas:\r\n##O input dos dados é feito diretamente através da planilha do Excel determinada pelo nome do documento salvo:\r\nimportado = pd.read_excel(\"Input_batelada_processos_fermentativos_ajuste.xlsx\",\"Operação_batelada\")\r\n##Linha de comando que permite a conversão dos dados numéricos extraídos das células do Excel para o formato de vetores Numpy:\r\nimportado_np = importado.values\r\n##Concentração celular inicial:\r\nCx0 = importado_np[7,0]\r\nprint(\"\\nCx0:\\n\",Cx0)\r\n##Concentração inicial de substrato fornecido às celulas inicial:\r\nCs0 = importado_np[7,1]\r\nprint(\"\\nCs0:\\n\",Cs0)\r\n##Concentração de produto gerado inicial:\r\nCp0 = importado_np[7,2]\r\nprint(\"\\nCp0:\\n\",Cp0)\r\n##Início do cultivo microbiano em batelada:\r\nT0 = importado_np[7,3]\r\nprint(\"\\nT0:\\n\",T0)\r\n##Término do cultivo microbiano em batelada:\r\nTf = importado_np[7,4]\r\nprint(\"\\nTf:\\n\",Tf)\r\n##Intervalo de tempo (passo), entre o tempo total de cultivo em batelada,em que o mesmo será analisado matematicamente:\r\nIntervalo = importado_np[7,5]\r\nprint(\"\\nIntervalo:\\n\",Intervalo)\r\n##Entrada inicial aproximada para a taxa específica de crescimento:\r\nmimaximo = importado_np[14,0]\r\nprint(\"\\nmimáx:\\n\",mimaximo)\r\n##Entrada inicial aproximada para a constante Ks:\r\nKs = importado_np[14,1]\r\nprint(\"\\nKs:\\n\",Ks)\r\n##Entrada inicial aproximada para o coeficiente de transferência de substrato para o crescimento celular Yx/s:\r\nYxs = importado_np[14,2]\r\nprint(\"\\nYxs:\\n\",Yxs)\r\n##Entrada inicial aproximada para a constante alfa associada à formação de produto pelas células durante a fermentação:\r\nalfa = importado_np[14,3]\r\nprint(\"\\nalfa:\\n\",alfa)\r\n##Entrada inicial aproximada para a constante beta associada à formação de produto pelas células durante a fermentação:\r\nbeta = importado_np[14,4]\r\nprint(\"\\nbeta:\\n\",beta)\r\n##Variação na concentração de células observada experimentalmente durante o decorrer do tempo total de cultivo em estudo:\r\nCxexp= importado_np[:,18]\r\nprint(\"\\nCxexp:\\n\",Cxexp)\r\n##Variação na concentração de substrato observada experimentalmente durante o decorrer do tempo total de cultivo em estudo:\r\nCsexp = importado_np[:,19]\r\nprint(\"\\nCsexp:\\n\",Csexp)\r\n##Variação na concentração de produto formado observada experimentalmente durante o decorrer do tempo total de cultivo em estudo:\r\nCpexp = importado_np[:,20]\r\nprint(\"\\nCpexp:\\n\", Cpexp)\r\n\r\np = importado_np[18,7]\r\nprint(\"\\np:\\n\",p)\r\n\r\n#Definindo as equações diferenciais que descrevem a variação da concentração celular, de substrato e produto em função do tempo:\r\ndef modeloscrescimento (Concent,t, *args):\r\n Cx,Cs,Cp=Concent\r\n##Criação de uma matriz \"args\" com cada argumento atribuído a uma constante que será ajustada pelo algoritmo, com cada número dentro dos colchetes referentes à coluna da mesma: \r\n mimaximo = args[0]\r\n Ks = args[1]\r\n Yxs = args[3]\r\n##Definição das equações diferencias ordinárias que descrevem a variação das concentrações com o tempo, bem como o modelo de Monod: \r\n mi=mimaximo*(Cs/(Ks+Cs))\r\n dCxdt=mi*Cx\r\n dCsdt=(-1/Yxs)*mi*Cx\r\n##Criação do condicional para a definição do comportamento de variação de Cp (associado, não associado ou parcialmente associado ao crescimento celular):\r\n if p==0:\r\n alfa = args[2]\r\n dCpdt=alfa*mi*Cx\r\n elif p==1:\r\n beta = args[4]\r\n dCpdt=beta*Cx \r\n else:\r\n alfa = args[2]\r\n beta = args[4]\r\n dCpdt=alfa*mi*Cx+beta*Cx\r\n return(dCxdt,dCsdt,dCpdt)\r\n \r\n#Lista criada para armazenar os valoes atribuídos às variáveis (constantes) cinéticas microbianas de acordo com os dados aproximados fornecidos:\r\nmm_params = (mimaximo, Ks, alfa, Yxs, beta)\r\nprint(mm_params)\r\n\r\n#Definindo as condições iniciais para permitir a integração numérica computacional das EDOs definidas no modelo \"modeloscrescimento\", seguindo a cinética de Monod:\r\n##Lista que contém os valores iniciais de concentração celular, de substrato de produto:\r\ninit_concent=[Cx0,Cs0,Cp0]\r\n##Vetor tempo, do tipo arange, a partir do pacote numérico Numpy:\r\nt=np.arange(T0,Tf,Intervalo)\r\n##Atribuição de pesos para cada uma das concentrações consideradas no ajuste matemático (quanto maior o valor do coeficiente, menos relevante para o ajuste - tendência do algorismo a se aproximar mais de uma das variáveis de concentração):\r\ndpC=[1,1,1]\r\n##Comando para integrar numericamente as EDOs definidas pelo modelo 'modeloscrescimento':\r\nConcent=odeint(modeloscrescimento,init_concent,t,args=(mm_params))\r\nprint(\"\\nConcentrações:\\n\",Concent)\r\nprint(t)\r\n\r\n#Separando, a partir da matriz Concent, os vetores Cx, Cs e Cp:\r\n##Matriz concentração celular com os valores calculados através da solução do modelo de EDO por integração numérica computacional:\r\nCx=Concent[:,0]\r\n#print(\"Cx:\", Cx)\r\n##Matriz concentração de substrato com os valores calculados através da solução do modelo de EDO por integração numérica computacional:\r\nCs=Concent[:,1]\r\n#print(\"Cs:\", Cs)\r\n##Matriz concentração de produto com os valores calculados através da solução do modelo de EDO por integração numérica computacional:\r\nCp=Concent[:,2]\r\n#print(\"Cp:\", Cp)\r\n\r\n#Criando os vetores com os valores experimentais fornecidos pelo usuário (extraídos da planilha do Excel) para as variáveis concentração e tempo para efeitos de ajuste matemático aos modelos cinéticos:\r\n##Vetor com os valores de concentração celular experimentais:\r\nCxexp= importado_np[:,18]\r\n##Vetor com os valores de concentração de substrato experimentais:\r\nCsexp = importado_np[:,19]\r\n##Vetor com os valores de concentração de produto experimentais:\r\nCpexp = importado_np[:,20]\r\n##Vetor array com os intervalos de tempo analisados durante o período total de cultivo em batelada:\r\nT=np.array([0,0.5,1,1.5,2,2.5,3,3.5,4,4.5,5,5.5,6,6.5,7,7.5,8,8.5,9,9.5,10,10.5])\r\n#Criação de uma matriz tridimensional preenchida com zeros (contendo colunas para Cx, Cs e Cp), com a mesma dimensão do vetor tempo experimental:\r\nexp_C=np.zeros((len(t),3))\r\n##Substituição da coluna zero da matriz anterior (primeira coluna) com os valores de entrada de concentração celular experimentais (extraídos do Excel):\r\nexp_C[:,0]=Cxexp\r\n##Substituição da coluna um da matriz anterior (segunda coluna) com os valores de entrada de concentração de substrato experimentais (extraídos do Excel):\r\nexp_C[:,1]=Csexp\r\n##Substituição da coluna dois da matriz anterior (terceira coluna) com os valores de entrada de concentração de produto experimentais (extraídos do Excel):\r\nexp_C[:,2]=Cpexp\r\n#Cexp=Cxexp,Csexp\r\nprint(exp_C)\r\n\r\n#Criação da Função Objetivo da simulação:\r\ndef residuals(p):\r\n p=tuple(p)\r\n #print(\"p\",p)\r\n##Calculando os valores simulados, através dos modelos matemáticos de EDOs (resolvidas por integração numérica computacional),para Cx, Cs e Cp, utilizando os argumentos guardados na tupla: \r\n sim_P=odeint(modeloscrescimento,init_concent,t,args=p)\r\n##Determinado o erro entre os valores experimentais e calculados pelo método acima para os valores de Cx, Cs e Cp:\r\n res=sim_P-exp_C\r\n##Para cada uma das três colunas da matriz contendo os valores experimentais, o erro calculado é dividido pelo peso atribuído a Cx, Cs e Cp:\r\n for i in range(0,3):\r\n res[:,i]=res[:,i]/dpC[i]\r\n##O que a função retorna é uma matriz unidimensional, pelo uso da função flatten:\r\n return res.flatten()\r\n\r\n#Minimizando a função objetiva pela função leastsq:\r\n##Importando o pacote necessário à minimização do erro (residual):\r\nfrom scipy.optimize import leastsq\r\n##A lista 'initial guess' contém os valores de entrada, portanto, aproximados, para as constantes cinéticas de crescimento microbiano extraídas diretamente da planilha do Excel:\r\ninitial_guess = [mimaximo,Ks,Yxs,alfa,beta]\r\n##A função leastsq, a partir da solução da função residuals e dos valores de entrada aproximados anteriores irá definir qual o valor dessas mesmas constantes que minimiza o erro observado entre os valores simulados e modelo para Cx, Cs e Cp:\r\nfitted_paramers=leastsq(residuals,initial_guess)[0]\r\nprint(fitted_paramers)\r\n##As constantes, já com valores otimizados, são armazenadas em uma tupla (lista imutável):\r\nfitted_paramers=tuple(fitted_paramers)\r\n##Agora, a função modeloscrescimento, que contém as EDOs que descrevem o comportamento, em função do tempo, de Cx, Cs e Cp é novamente integrada numericamente utilizando as constantes cinéticas (argumentos) já otimizados (convergidas):\r\nCotim=odeint(modeloscrescimento,init_concent,t,args=(fitted_paramers))\r\nprint(Cotim)\r\n\r\n#Utilizando o pacote pyplot, disponível na bibliotea matplotlib, para geração gráfica dos valores de concentração celular, de substrato e de produto obtidos ao fim da integração numérica computacional (valores estimados pelo modelo matemático):\r\ndef printSomething_grafico():\r\n SMALL_SIZE = 14 \r\n MEDIUM_SIZE = 20 \r\n BIGGER_SIZE = 20 \r\n\r\n##Comando para determinar o tamanho segundo o qual os textos grafados no gráfico serão impressos na tela, de acordo com as dimensões indicadas anteriormente:\r\n plt.rc('font', size=SMALL_SIZE) \r\n plt.rc('axes', titlesize=SMALL_SIZE) \r\n plt.rc('axes', labelsize=MEDIUM_SIZE) \r\n plt.rc('xtick', labelsize=SMALL_SIZE) \r\n plt.rc('ytick', labelsize=SMALL_SIZE) \r\n plt.rc('legend', fontsize=SMALL_SIZE) \r\n plt.rc('figure', titlesize=BIGGER_SIZE) \r\n\r\n##Algoritmo para plotar e imprimir os dados numéricos graficamente:\r\n#Definindo a figura que será gerada:\r\n f = plt.figure() \r\n#Definindo a criação de um gráfico 1x1 com um eixo secundário:\r\n ax = f.add_subplot(111) \r\n#Definindo os vetores tempo e concentração que serão plotados nos eixos principais, bem como a esperrura e cor das linhas dos modelos matemáticos integrados numericamente: \r\n lns1 = ax.plot(t,Cotim[:,0],'fuchsia',linewidth=4,label='Cx modelo') \r\n lns2 = ax.plot(t,Cotim[:,1],'lime',linewidth=4,label='Cs modelo') \r\n#Definição do eixo secundário em si, pelo uso da função twinx:\r\n ax2 = ax.twinx()\r\n#Definindo os vetores tempo e concentração que serão plotados no eixo secundário, bem como a esperrura e cor das linhas dos modelos matemáticos integrados numericamente:\r\n lns3 = ax2.plot(t,Cotim[:,2],'sienna',linewidth=4,label='Cp modelo') \r\n lns4 = ax.plot(T,Cxexp,'o',color='orchid',markeredgecolor='black',markersize=10,label='Cx experimental') \r\n lns5 = ax.plot(T,Csexp,'o',color='lightgreen',markeredgecolor='black',markersize=10,label='Cs experimental')\r\n lns6 = ax2.plot(T,Cpexp,'o',color='tan',markeredgecolor='black',markersize=10,label='Cp experimental') \r\n#Definição das legendas de casa um dos eixos (x e y - principal e secundário):\r\n ax.set_xlabel('Tempo de cultivo (h)',weight='bold') \r\n ax.set_ylabel('Cx, Cs (g/L)', weight='bold')\r\n ax2.set_ylabel('Cp (g/L)', weight='bold') \r\n # added these three lines\r\n#Linha de comando que permite a geração da legenda completa, com todas os modelos e dados experimentais graficados:\r\n lns = lns1+lns2+lns3+lns4+lns5+lns6\r\n labs = [l.get_label() for l in lns]\r\n ax.legend(lns, labs, loc=0)\r\n#Comando para a criação da grade de fundo do gráfico gerado: \r\n ax.grid(True) \r\n#Definição das dimensões (comprimento e largura) da figura gerada: \r\n f.set_figheight(5) \r\n f.set_figwidth(8) \r\n#Comando para a determinação da cor de fundo da área de plotagem do gráfico: \r\n f.patch.set_facecolor('white') \r\n#Comando para a definição do template disponível na biblioteca matplotlib para compor a estética da figura: \r\n plt.style.use('default') \r\n#Comando para o output do gráfico obtido:\r\n plt.show() \r\n\r\n#Selecionando a coluna referente à concentração celular otimizada a partir da matriz gerad após a integração numérica:\r\nCxot=Cotim[:,0]\r\n#Equação que permite calcular a produtividade celular (Px):\r\nPx=Cxot/t\r\nprint(\"Produtividade celular:\",Px)\r\n#Selecionando a coluna referente à concentração de produto gerado otimizada a partir da matriz gerad após a integração numérica:\r\nCpot=Cotim[:,2]\r\n#Equação que permite calcular a produtividade celular (Pp):\r\nPp=Cpot/t \r\nprint(\"Produtividade produto:\",Pp)\r\n#Determinando a relação gráfica entre a produtividade celular, produtividade do produto Pp e o tempo do cultivo simulado:\r\n#Utilizando o pacote pyplot, disponível na bibliotea matplotlib, para geração gráfica dos valores de concentração celular, de substrato e de produto obtidos ao fim da integração numérica computacional (valores estimados pelo modelo matemático):\r\ndef grafico_produtividade():\r\n SMALL_SIZE = 14 \r\n MEDIUM_SIZE = 20 \r\n BIGGER_SIZE = 20 \r\n\r\n##Comando para determinar o tamanho segundo o qual os textos grafados no gráfico serão impressos na tela, de acordo com as dimensões indicadas anteriormente:\r\n plt.rc('font', size=SMALL_SIZE) \r\n plt.rc('axes', titlesize=SMALL_SIZE) \r\n plt.rc('axes', labelsize=MEDIUM_SIZE) \r\n plt.rc('xtick', labelsize=SMALL_SIZE) \r\n plt.rc('ytick', labelsize=SMALL_SIZE) \r\n plt.rc('legend', fontsize=SMALL_SIZE) \r\n plt.rc('figure', titlesize=BIGGER_SIZE) \r\n\r\n##Algoritmo para plotar e imprimir os dados numéricos graficamente:\r\n#Definindo a figura que será gerada:\r\n f = plt.figure() \r\n#Definindo a criação de um gráfico 1x1 com um eixo secundário:\r\n ax = f.add_subplot(111) \r\n#Definindo os vetores tempo e concentração que serão plotados nos eixos principais, bem como a esperrura e cor das linhas dos modelos matemáticos integrados numericamente: \r\n lns1 = ax.plot(t,Px,'fuchsia',linewidth=4,label='Produtividade celular') \r\n#Definição do eixo secundário em si, pelo uso da função twinx:\r\n ax2 = ax.twinx()\r\n#Definindo os vetores tempo e concentração que serão plotados no eixo secundário, bem como a esperrura e cor das linhas dos modelos matemáticos integrados numericamente:\r\n lns2 = ax2.plot(t,Pp,'lime',linewidth=4,label='Produtividde metabólito') \r\n#Definição das legendas de casa um dos eixos (x e y - principal e secundário):\r\n ax.set_xlabel('Tempo de cultivo (h)',weight='bold') \r\n ax.set_ylabel('Px(g/L.s)', weight='bold')\r\n ax2.set_ylabel('Pp(g/L.s)', weight='bold') \r\n # added these three lines\r\n#Linha de comando que permite a geração da legenda completa, com todas os modelos e dados experimentais graficados:\r\n lns = lns1+lns2\r\n labs = [l.get_label() for l in lns]\r\n ax.legend(lns, labs, loc=0)\r\n#Comando para a criação da grade de fundo do gráfico gerado: \r\n ax.grid(True) \r\n#Definição das dimensões (comprimento e largura) da figura gerada: \r\n f.set_figheight(5) \r\n f.set_figwidth(8) \r\n#Comando para a determinação da cor de fundo da área de plotagem do gráfico: \r\n f.patch.set_facecolor('white') \r\n#Comando para a definição do template disponível na biblioteca matplotlib para compor a estética da figura: \r\n plt.style.use('default') \r\n#Comando para o output do gráfico obtido:\r\n plt.show() \r\n\r\n#Equação que permite calcular a produtividade celular específica (Ppx):\r\nPpx=Cpot*(1/Cxot)\r\nprint(\"Produtividade específica:\",Ppx)\r\n#Determinando a relação gráfica entre a produtividade celular específica (Ppx) e o tempo do cultivo simulado:\r\ndef grafico_produtividade_especifica():\r\n SMALL_SIZE = 14 \r\n MEDIUM_SIZE = 20 \r\n BIGGER_SIZE = 20 \r\n\r\n## Comando para determinar o tamanho segundo o qual os textos encontrados no gráfico serão impressos na tela:\r\n plt.rc('font', size=SMALL_SIZE) \r\n plt.rc('axes', titlesize=SMALL_SIZE) \r\n plt.rc('axes', labelsize=MEDIUM_SIZE) \r\n plt.rc('xtick', labelsize=SMALL_SIZE) \r\n plt.rc('ytick', labelsize=SMALL_SIZE) \r\n plt.rc('legend', fontsize=SMALL_SIZE) \r\n plt.rc('figure', titlesize=BIGGER_SIZE) \r\n\r\n## Algoritmo para plotar e imprimir os dados numéricos graficamente:\r\n#Definindo a figura que irá ser gerada:\r\n f = plt.figure() \r\n#Definindo os vetores tempo e concentração celular que irão ser plotados, referente a cada uma das taxas de crescimento analisadas, bem como a cor e espessura da linha graficada: \r\n _ = plt.plot(t,Ppx,'coral',linewidth=4) \r\n#Definição do título dos eixos x e y, assim como da formatação do texto a ser utilizada (negrito):\r\n _ = plt.xlabel('Tempo de cultivo (h)',weight='bold') \r\n _ = plt.ylabel('Produtividade celular específica', weight='bold') \r\n#Comando que habilita a presença da linha de grade ao fundo da área de plotagem:\r\n _ = plt.grid(True)\r\n#Definição das dimensões de comprimento e largura da figura gerada: \r\n f.set_figheight(9) \r\n f.set_figwidth(14) \r\n#Definição da cor de fundo da área de plotagem do gráfico: \r\n f.patch.set_facecolor('white') \r\n#Definição do template, disponibilizado pelo pacote matplotlib, para compor a estética da figura: \r\n plt.style.use('default') \r\n#Comando para o output do gráfico obtido: \r\n plt.show() \r\n#plt.plot(t,Px)\r\n#plt.show()\r\n\r\n#plt.plot(t,Pp)\r\n#plt.show() \r\n\r\n#plt.plot(t,Ppx)\r\n#plt.show() \r\n\r\nmimaxotimo=fitted_paramers[0]\r\nKsotimo=fitted_paramers[1]\r\nmiot=mimaxotimo*(Cs/(Ksotimo+Cs))\r\nprint(\"mi:\",miot)\r\n#Determinando a relação gráfica entre a taxa específica de crescimento e o tempo do cultivo simulado:\r\ndef grafico_mis():\r\n SMALL_SIZE = 14 \r\n MEDIUM_SIZE = 20 \r\n BIGGER_SIZE = 20 \r\n\r\n## Comando para determinar o tamanho segundo o qual os textos encontrados no gráfico serão impressos na tela:\r\n plt.rc('font', size=SMALL_SIZE) \r\n plt.rc('axes', titlesize=SMALL_SIZE) \r\n plt.rc('axes', labelsize=MEDIUM_SIZE) \r\n plt.rc('xtick', labelsize=SMALL_SIZE) \r\n plt.rc('ytick', labelsize=SMALL_SIZE) \r\n plt.rc('legend', fontsize=SMALL_SIZE) \r\n plt.rc('figure', titlesize=BIGGER_SIZE) \r\n\r\n## Algoritmo para plotar e imprimir os dados numéricos graficamente:\r\n#Definindo a figura que irá ser gerada:\r\n f = plt.figure() \r\n#Definindo os vetores tempo e concentração celular que irão ser plotados, referente a cada uma das taxas de crescimento analisadas, bem como a cor e espessura da linha graficada: \r\n _ = plt.plot(t,miot,'coral',linewidth=4) \r\n#Definição do título dos eixos x e y, assim como da formatação do texto a ser utilizada (negrito):\r\n _ = plt.xlabel('Tempo de cultivo (h)',weight='bold') \r\n _ = plt.ylabel('Taxa específica de crescimento $\\mu= (h^{-1}$)', weight='bold') \r\n#Comando que habilita a presença da linha de grade ao fundo da área de plotagem:\r\n _ = plt.grid(True)\r\n#Definição das dimensões de comprimento e largura da figura gerada: \r\n f.set_figheight(9) \r\n f.set_figwidth(14) \r\n#Definição da cor de fundo da área de plotagem do gráfico: \r\n f.patch.set_facecolor('white') \r\n#Definição do template, disponibilizado pelo pacote matplotlib, para compor a estética da figura: \r\n plt.style.use('default') \r\n#Comando para o output do gráfico obtido: \r\n plt.show() \r\n#Criando a função que permite a geração do output dos valores de concentração (Cx, Cs e Cp) gerados por integração numérica computacional:\r\ndf_concents= pd.DataFrame({'Tempo(h)': t, 'Cx(g/L)': Cx, 'Cs(g/L)': Cs, 'Cp(g/L)': Cp})\r\n##O pacote Pandas permite escrever, em Python, em uma planilha Excel, os dados de Cx, Cs e Cp calculados após integração pelo algoritmo por meio da função abaixo:\r\ndef printSomething_writer_concents(): \t\r\n with pd.ExcelWriter('Output_batelada_processos_fermentativos.xlsx') as writer:\r\n df_concents.to_excel(writer, sheet_name=\"Output_concent\")\r\n##O documento, em xlsx, é salvo com o mesmo nome indicado entre aspas na função anterior:\r\n writer.save()\r\n\r\n#Utilizando o pacote pyplot, disponível na bibliotea matplotlib, para geração gráfica dos valores de concentração celular, de substrato e de produto obtidos ao fim da integração numérica computacional (valores estimados pelo modelo matemático):\r\n \r\n\r\n#plt.plot(t,miot)\r\n#plt.show() \r\n \r\n #9x14\r\n \r\nLabel_title_3= Label (window, text = \"Resultados gerados em planilha Excel:\")\r\nLabel_title_3[\"font\"] = (\"Times\", \"12\", \"italic\", \"bold\")\r\nLabel_title_3[\"fg\"] = (\"black\")\r\nLabel_title_3.grid(column=0, row=1)\r\n\r\n#Construção do botão que irá aparecer na interface criada, permitindo o acesso aos dados de concentração celular, de substrato e de produto calculadas via integração numérica computacional dos modelos de EDOs:\r\nButton_concent= Button(window, text=\"Resultados Concentração - Integração Numérica\", command=printSomething_writer_concents) \r\nButton_concent[\"font\"] = (\"Times\", \"11\", \"bold\")\r\nButton_concent[\"width\"]= 35\r\nButton_concent[\"bg\"]= \"lightgreen\"\r\nButton_concent.grid(row=2, column=0, pady=10)\r\n\r\nLabel_title_4= Label (window, text = \"Resultados gráficos gerados em Python:\")\r\nLabel_title_4[\"font\"] = (\"Times\", \"12\", \"italic\", \"bold\")\r\nLabel_title_4[\"fg\"] = (\"black\")\r\nLabel_title_4.grid(column=0, row=3)\r\n\r\n#Construção do botão que irá aparecer na interface criada, permitindo o acesso ao gráfico gerado:\r\nButton_grafico_concentracao = Button(window, text=\"Gráfico Perfis de Concentração\", command=printSomething_grafico) \r\nButton_grafico_concentracao[\"font\"] = (\"Times\", \"11\", \"bold\")\r\nButton_grafico_concentracao[\"width\"]= 28\r\nButton_grafico_concentracao[\"bg\"]= \"lightpink\"\r\nButton_grafico_concentracao.grid(row=4, column=0, pady=10)\r\n\r\n#Construção do botão que irá aparecer na interface criada, permitindo o acesso ao gráfico gerado:\r\nButton_grafico_produtividade = Button(window, text=\"Gráfico Produtividade Celular/Metabólito\", command=grafico_produtividade) \r\nButton_grafico_produtividade[\"font\"] = (\"Times\", \"11\", \"bold\")\r\nButton_grafico_produtividade[\"width\"]= 30\r\nButton_grafico_produtividade[\"bg\"]= \"lightpink\"\r\nButton_grafico_produtividade.grid(row=4, column=1, pady=10)\r\n\r\n#Construção do botão que irá aparecer na interface criada, permitindo o acesso ao gráfico gerado:\r\nButton_grafico_especifica = Button(window, text=\"Gráfico Produtividade Específica\", command=grafico_produtividade_especifica) \r\nButton_grafico_especifica[\"font\"] = (\"Times\", \"11\", \"bold\")\r\nButton_grafico_especifica[\"width\"]= 28\r\nButton_grafico_especifica[\"bg\"]= \"lightpink\"\r\nButton_grafico_especifica.grid(row=5, column=0, pady=10)\r\n\r\n#Construção do botão que irá aparecer na interface criada, permitindo o acesso ao gráfico gerado:\r\nButton_grafico_mis = Button(window, text=\"Gráfico Taxa Específica de Crescimento\", command=grafico_mis) \r\nButton_grafico_mis[\"font\"] = (\"Times\", \"11\", \"bold\")\r\nButton_grafico_mis[\"width\"]= 30\r\nButton_grafico_mis[\"bg\"]= \"lightpink\"\r\nButton_grafico_mis.grid(row=5, column=1, pady=10)\r\ntk.mainloop()\r\n","sub_path":"Github_versao_final_ajuste_batelada_monod_Levenber_Maquardt.py","file_name":"Github_versao_final_ajuste_batelada_monod_Levenber_Maquardt.py","file_ext":"py","file_size_in_byte":25286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"572023994","text":"# coding: utf-8\n'''\n------------------------------------------------------------------------------\n Copyright 2016-2017 Esri\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n http://www.apache.org/licenses/LICENSE-2.0\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n------------------------------------------------------------------------------\n ==================================================\n RadialLineOfSightAndRangeTestCase.py\n --------------------------------------------------\n requirements: ArcGIS 10.3+, Python 2.7\n author: ArcGIS Solutions\n contact: support@esri.com\n company: Esri\n ==================================================\n description:\n Unit tests for Visibility tools\n ==================================================\n'''\n\n# IMPORTS ==========================================\nimport os\nimport sys\nimport traceback\nimport unittest\n\nimport arcpy\nimport UnitTestUtilities\nimport Configuration\n\nsys.path.append(os.path.normpath(os.path.join(os.path.dirname(__file__), \\\n r\"../../../toolboxes/scripts\")))\nimport RadialLineOfSightAndRange\n\nclass RadialLineOfSightAndRangeTestCase(unittest.TestCase):\n\n def setUp(self):\n arcpy.env.overwriteOutput = True \n \n def test_toolboxMain(self):\n\n runToolMessage = \".....RadialLineOfSightAndRange.test_toolboxMain\"\n arcpy.AddMessage(runToolMessage)\n Configuration.Logger.info(runToolMessage)\n\n if arcpy.CheckExtension(\"3D\") == \"Available\":\n arcpy.CheckOutExtension(\"3D\")\n else:\n raise Exception(\"3D license is not available.\")\n\n observers = os.path.join(Configuration.militaryInputDataGDB, \"RLOS_Observers\")\n elevationSurface = os.path.join(Configuration.militaryInputDataGDB, \"ElevationUTM_Zone10\")\n\n viewshedFC = r'in_memory\\viewshed'\n donutWedgeFC = r'in_memory\\wedge'\n pieWedgeFC = r'in_memory\\fullwedge'\n\n RadialLineOfSightAndRange.createViewshed(observers, \\\n elevationSurface, \\\n '1000', '90', '180', '20', '500', \\\n viewshedFC, donutWedgeFC, pieWedgeFC)\n\n viewshedFeaturesCount = int(arcpy.GetCount_management(viewshedFC).getOutput(0))\n donutFeaturesCount = int(arcpy.GetCount_management(donutWedgeFC).getOutput(0))\n pieFeaturesCount = int(arcpy.GetCount_management(pieWedgeFC).getOutput(0))\n\n self.assertGreater(viewshedFeaturesCount, 0, \"No output features created for \" + str(viewshedFC))\n self.assertGreater(donutFeaturesCount, 0, \"No output features created for \" + str(donutWedgeFC))\n self.assertGreater(pieFeaturesCount, 0, \"No output features created for \" + str(pieWedgeFC))\n\n def test_surfaceContainsPoint(self):\n '''\n Check if elevation dataset contains the specified point\n '''\n runToolMessage = \".....RadialLineOfSightAndRange.test_surfaceContainsPoint\"\n arcpy.AddMessage(runToolMessage)\n Configuration.Logger.info(runToolMessage)\n\n observers = os.path.join(Configuration.militaryInputDataGDB, \"RLOS_Observers\")\n\n elevationSurface = os.path.join(Configuration.militaryInputDataGDB, \"ElevationUTM_Zone10\")\n\n pointsIn = RadialLineOfSightAndRange.surfaceContainsPoints(observers, elevationSurface)\n\n self.assertTrue(pointsIn, 'Points not within Surface as Expected')\n\n def test_surfaceContainsPointWgs84(self): \n '''\n Check if elevation dataset contains the specified point not in same SR as surface\n '''\n runToolMessage = \".....RadialLineOfSightAndRange.test_surfaceContainsPointWgs84\"\n arcpy.AddMessage(runToolMessage)\n Configuration.Logger.info(runToolMessage)\n\n # List of coordinates\n coordinates = [[-121.5, 36.5], [-121.2, 36.1]]\n\n # Create an in_memory feature class to contain the coordinate pairs\n observerFeatureClass = arcpy.CreateFeatureclass_management(\n \"in_memory\", \"tempfc\", \"POINT\", spatial_reference=arcpy.SpatialReference(4326))[0]\n\n # Open an insert cursor\n with arcpy.da.InsertCursor(observerFeatureClass, [\"SHAPE@\"]) as cursor:\n # Iterate through list of coordinates and add to cursor\n for (x, y) in coordinates:\n point = arcpy.Point(x, y)\n pointGeo = arcpy.PointGeometry(point, \\\n arcpy.SpatialReference(4326))\n cursor.insertRow([pointGeo])\n\n elevationSurface = os.path.join(Configuration.militaryInputDataGDB, \"ElevationUTM_Zone10\")\n\n arePointsIn = RadialLineOfSightAndRange.surfaceContainsPoints(observerFeatureClass, elevationSurface)\n\n self.assertTrue(arePointsIn, 'Points not within Surface as Expected')\n","sub_path":"utils/test/visibility_tests/RadialLineOfSightAndRangeTestCase.py","file_name":"RadialLineOfSightAndRangeTestCase.py","file_ext":"py","file_size_in_byte":5084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"84177324","text":"import json\nimport sqlalchemy as sa\nimport pandas as pd #pandas is overkill, but it makes the database work really really easy, and that's nice\nimport string\nfrom datetime import datetime\nimport os, argparse\n\nfrom web import models, db\n\nbasedir = os.path.abspath(os.path.dirname(__file__))\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--notebook'\n ,action='store_true'\n ,help='Set this flag to output to a Jupyter Notebook instead of a .py file'\n )\nNOTEBOOK = parser.parse_args().notebook\n\n\ntry:\n SQLALCHEMY_DATABASE_URI = os.environ['DATABASE_URL']\nexcept:\n SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.abspath(os.path.join(basedir, os.pardir, 'ARTBot.db'))\nSQL_ENGINE = sa.create_engine(SQLALCHEMY_DATABASE_URI)\n\n\n# Lists slots that should typically be available\ndef canvas_slot_generator():\n for slot in [1, 2, 3, 4, 5, 6, 7, 8, 9]:\n yield str(slot)\nget_canvas_slot = canvas_slot_generator()\n\ndef well_map(well):\n map = dict(zip(range(26), string.ascii_uppercase))\n letter = map[well[0]]\n number = well[1] + 1\n return letter + str(number)\n\n\ndef add_canvas_locations(template_string, artpieces):\n # write where canvas plates are to be placed into code\n canvas_locations = dict(zip(artpieces.title, get_canvas_slot))\n procedure = template_string.replace('%%CANVAS LOCATIONS GO HERE%%', str(canvas_locations))\n\n return procedure, canvas_locations\n\n\ndef add_pixel_locations(template_string, artpieces):\n # write where to draw pixels on each plate into code. Listed by color to reduce contamination\n pixels_by_color = dict()\n for index, artpiece in artpieces.iterrows():\n for color in artpiece.art:\n if color not in pixels_by_color:\n pixels_by_color[color] = dict()\n pixels_by_color[color][artpiece.title] = [well_map(pixel) for pixel in artpiece.art[color]]\n procedure = template_string.replace('%%PIXELS GO HERE%%', str(pixels_by_color))\n\n return procedure\n\n\nnum_pieces = 0\nwhile num_pieces not in range(1,10):\n try:\n num_pieces = int(input(\"How much art? (1-9)\"))\n except:\n num_pieces = 0\nquery = f\"\"\"SELECT * FROM artpieces\n WHERE status = 'Submitted'\n ORDER BY submit_date ASC\n LIMIT {num_pieces}\n \"\"\"\n\nartpieces = pd.read_sql(query, SQL_ENGINE, parse_dates = ['submit_date'])\nartpieces['art'] = artpieces.art.apply(json.loads)\n\n\nif not len(artpieces):\n print('No new art found. All done.')\n\nelse:\n print(f'Loaded {len(artpieces)} pieces of art')\n print(artpieces[['id','title','email','submit_date']])\n\n\n #Get Python art procedure template\n file_extension = 'ipynb' if NOTEBOOK == True else 'py' #Use Jupyter notbook template or .py template\n template_file = open(os.path.join(basedir,f'ART_TEMPLATE.{file_extension}'))\n template_string = template_file.read()\n template_file.close()\n\n\n procedure, canvas_locations = add_canvas_locations(template_string, artpieces)\n\n procedure = add_pixel_locations(procedure, artpieces)\n\n\n now = datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n unique_file_name = f'ARTISTIC_PROCEDURE_{now}.{file_extension}'\n output_file = open(os.path.join(basedir,'procedures',unique_file_name),'w')\n output_file.write(procedure)\n output_file.close()\n\n updated_records = models.artpieces.query.filter(models.artpieces.id.in_(artpieces.id))\n for record in updated_records:\n record.status = 'Processed'\n db.session.commit()\n\n print(f'Successfully generated artistic procedure into: ARTBot/robot/procedures/{unique_file_name}')\n print('The following slots will be used:')\n print('\\n'.join([f'Slot {str(canvas_locations[key])}: \"{key}\"' for key in canvas_locations]))","sub_path":"robot/art_processor.py","file_name":"art_processor.py","file_ext":"py","file_size_in_byte":3779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"368608284","text":"import json\nimport re\nimport threading\nimport time\nimport traceback\n\nimport logging\nimport websocket\n\nfrom own_adapter.agent import Agent\nfrom own_adapter.board import Board\nfrom own_adapter.element import Element\nfrom own_adapter.platform_access import PlatformAccess\n\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\nfrom config import SETTINGS as S\n\n\nAGENT_LOGIN = S['own_space']['login']\nAGENT_PASSWORD = S['own_space']['password']\n\n\ndef __do_something(element):\n \"\"\"Write your code here\"\"\"\n\n # examples:\n # put a message to a board\n message = 'Hello world!'\n element.get_board().put_message(message)\n\n # put a URL to an element\n url = 'https://www.own.space/'\n element.put_link(url)\n\n\ndef __run_on_element(element):\n \"\"\"Running on a target element\"\"\"\n try:\n __do_something(element)\n except Exception as ex:\n logger.exception('helloworld', 'Error: could not process an element. Element id: {}. Exception message: {}.\\n'\n '{}'.format(element.get_id(), str(ex), traceback.format_exc()))\n\n\ndef __run_on_board(board):\n \"\"\"Runs the agent on elements of a board\"\"\"\n elements = board.get_elements()\n for element in elements:\n __run_on_element(element)\n\n\ndef periodical_update():\n \"\"\"Does periodical work with a predefined time interval\"\"\"\n time_interval = 3\n\n while True:\n time.sleep(time_interval)\n\n agent = get_agent()\n boards = agent.get_boards()\n for board in boards:\n __run_on_board(board)\n logger.info('Daily news update is done.')\n\n\ndef get_agent():\n \"\"\"Returns the current agent\"\"\"\n login = AGENT_LOGIN\n password = AGENT_PASSWORD\n\n platform_access = PlatformAccess(login, password)\n helloworld_agent = Agent(platform_access)\n\n return helloworld_agent\n\n\ndef on_websocket_message(ws, message):\n \"\"\"Processes websocket messages\"\"\"\n message_dict = json.loads(message)\n content_type = message_dict['contentType']\n message_type = content_type.replace('application/vnd.uberblik.', '')\n\n logger.debug('helloworld', message)\n\n if message_type == 'liveUpdateElementCaptionEdited+json':\n element_caption = message_dict['newCaption']\n # looking for elements that target our agent\n if re.match(pattern='@helloworld:.+', string=element_caption):\n # create instances of Board and Element to work with them\n element_id = message_dict['path']\n news_agent = get_agent()\n board_id = '/'.join(element_id.split('/')[:-2])\n board = Board.get_board_by_id(board_id, news_agent.get_platform_access(), need_name=False)\n element = Element.get_element_by_id(element_id, news_agent.get_platform_access(), board)\n if element is not None:\n __run_on_element(element)\n\n\ndef on_websocket_error(ws, error):\n \"\"\"Logs websocket errors\"\"\"\n logger.error(error)\n\n\ndef on_websocket_open(ws):\n \"\"\"Logs websocket openings\"\"\"\n logger.info('Websocket is open')\n\n\ndef on_websocket_close(ws):\n \"\"\"Logs websocket closings\"\"\"\n logger.info('Websocket is closed')\n\n\ndef open_websocket():\n \"\"\"Opens a websocket to receive messages from the boards about events\"\"\"\n agent = get_agent()\n # getting the service url without protocol name\n platform_url_no_protocol = agent.get_platform_access().get_platform_url().split('://')[1]\n access_token = agent.get_platform_access().get_access_token()\n url = 'ws://{}/opensocket?token={}'.format(platform_url_no_protocol, access_token)\n\n ws = websocket.WebSocketApp(url,\n on_message=on_websocket_message,\n on_error=on_websocket_error,\n on_open=on_websocket_open,\n on_close=on_websocket_close)\n ws.run_forever()\n\n\ndef run():\n websocket_thread = None\n updater_thread = None\n\n while True:\n # opening a websocket for catching server messages\n if websocket_thread is None or not websocket_thread.is_alive():\n try:\n websocket_thread = threading.Thread(target=open_websocket)\n websocket_thread.start()\n except Exception as e:\n logger.exception('helloworld', 'Could not open a websocket. Exception message: {}'.format(str(e)))\n\n # periodical updates\n if updater_thread is None or not updater_thread.is_alive():\n try:\n updater_thread = threading.Thread(target=periodical_update)\n updater_thread.start()\n except Exception as e:\n logger.exception('helloworld', 'Could not start updater. Exception message: {}'.format(str(e)))\n\n # wait until next check\n time.sleep(10)\n\n\nif __name__ == '__main__':\n run()\n","sub_path":"agents_demo/hello_world_agent.py","file_name":"hello_world_agent.py","file_ext":"py","file_size_in_byte":4871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"474363262","text":"# Imports\nimport numpy as np \nimport cv2\nimport argparse\n\n# argparse ArgumentParser instance\nap = argparse.ArgumentParser()\n\nap.add_argument('-i', '--image', required = True,\n\thelp = \"insert image file_path here\")\n\nargs = vars(ap.parse_args())\n\n# import image\nimage = cv2.imread(args['image'])\ncv2.imshow(\"input image\", image)\n#cv2.waitKey(0)\n\n# let's create a mask that will crop out the base of the grand canyon valley\n# first we want to find out where everything is.\nh,w = image.shape[:2]\n\n# create quadrants function that returns cartesian quadrants\ndef quadrants(image):\n\th,w = image.shape[:2]\n\t\n\tfirst_quadrant = image[0:h/2, w/2:w]\n\tsecond_quadrant = image[0:h/2, 0:w/2]\n\tthird_quadrant = image[h/2:h, 0:w/2]\n\tfourth_quadrant = image[h/2:h, w/2:w]\n\n\treturn first_quadrant, second_quadrant, third_quadrant, fourth_quadrant\n\ndef show_quadrants(image):\n\n\tfirst_quadrant, second_quadrant, third_quadrant, fourth_quadrant = quadrants(image)\n\n\tcv2.imshow('first_quadrant', first_quadrant)\n\tcv2.imshow('second_quadrant', second_quadrant)\n\tcv2.imshow('third_quadrant', third_quadrant)\n\tcv2.imshow('fourth_quadrant', fourth_quadrant)\n\tcv2.waitKey(0)\n\n#show_quadrants(image)\n\n# ok so we discovered that the cloud we want is in the top right of the first quadrant\n# about 2w/3:w and 0:h/2 \n\n# create mask\nmask = np.zeros(image.shape[:2], dtype = np.uint8)\n# draw rectangle over the mask in top right corner that we want to crop\n# first we need to figure out our starting and ending corners\ntop_left = (int(w*2/3), 0)\nbottom_right = (w, int(h/4))\ncv2.rectangle(mask, top_left, bottom_right, 255, -1)\ncv2.imshow(\"mask\", mask)\n\n# use bitwise_and on the image to create a clouds image object from the mask\nclouds = cv2.bitwise_and(image, image, mask = mask)\ncv2.imshow(\"clouds!\", clouds)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n\n\n\n\n\n\n\n\n","sub_path":"1.4.8_Masks.py","file_name":"1.4.8_Masks.py","file_ext":"py","file_size_in_byte":1831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"6081534","text":"import random\n\n\nLOCATIONS = [\n \"hall\",\n \"kitchen\",\n \"bedroom\",\n \"bathroom\",\n \"balcony\"\n]\n\nLOCATION_TARGETS = {\n \"hall\": [\"lights\", \"music\", \"fan\", \"tv\"],\n \"kitchen\": [\"lights\", \"fan\"],\n \"bedroom\": [\"lights\", \"music\", \"fan\", \"tv\"],\n \"bathroom\": [\"lights\", \"music\"],\n \"balcony\": [\"lights\"],\n}\n\nTARGETS = [\n \"lights\",\n \"music\",\n \"fan\",\n \"tv\",\n]\n\nTARGET_OPS = {\n \"lights\": [\"on\", \"off\"],\n \"music\": [\"on\", \"off\", \"play\"],\n \"fan\": [\"on\", \"off\", \"play\"],\n \"tv\": [\"on\", \"off\", \"play\"],\n}\n\nGREETINGS = [\n \"good morning\",\n \"good evening\",\n \"good afternoon\",\n \"hello\",\n \"hey\",\n]\n\nGREETING_RESPONSES = [\n \"how can I help you\",\n \"what can I do for you\",\n \"what can I help you with\",\n]\n\nEMPTY_RESPONSES = [\n \"which ones?\",\n \"where exactly?\",\n]\n\ndef _greeting_dialog():\n greeting = GREETINGS[random.randint(0, len(GREETINGS) - 1)]\n response = GREETING_RESPONSES[random.randint(0, len(GREETING_RESPONSES) - 1)]\n spoken_dialog = \"%s\\t%s, %s\" % (\n greeting,\n greeting,\n response\n )\n return spoken_dialog\n\n\ndef _query_target_location_dialog(entities):\n entities['target'] = TARGETS[random.randint(0, len(TARGETS) - 1)]\n operations = TARGET_OPS[entities['target']]\n entities['action'] = operations[random.randint(0, len(operations) - 1)]\n response = EMPTY_RESPONSES[random.randint(0, len(EMPTY_RESPONSES) - 1)]\n spoken_dialog = \"%s the %s\\t%s\" % (\n entities['action'],\n entities['target'],\n response\n )\n if entities['action'] in ['on', 'off']:\n spoken_dialog = \"turn \" + spoken_dialog\n return spoken_dialog, entities\n\n\ndef _query_location_confirm_dialog(entities):\n subset = [l for l in LOCATIONS if entities['target'] in LOCATION_TARGETS[l]]\n entities['location'] = subset[random.randint(0, len(subset) - 1)]\n response = \"api_call {target} {action} {location}\".format(**entities)\n spoken_dialog = \"In the %s\\t%s\" % (\n entities['location'],\n response\n )\n return spoken_dialog, entities\n\n\ndef three_state_dialog_gen(count=50):\n text = \"\"\n for dialog in range(count):\n entities = {\"target\": None, \"location\": None, \"action\": None}\n for exchange in range(1, 4):\n if exchange == 1:\n spoken_dialog = _greeting_dialog()\n elif exchange == 2:\n spoken_dialog, entities = _query_target_location_dialog(entities)\n else:\n spoken_dialog, entities = _query_location_confirm_dialog(entities)\n text += \"%s %s\\n\" % (exchange, spoken_dialog)\n text += '\\n'\n return text\n\n\ndef _single_state_dialog(entities):\n entities['target'] = TARGETS[random.randint(0, len(TARGETS) - 1)]\n operations = TARGET_OPS[entities['target']]\n entities['action'] = operations[random.randint(0, len(operations) - 1)]\n loc_subset = [loc for loc in LOCATIONS if entities['target'] in LOCATION_TARGETS[loc]]\n entities['location'] = loc_subset[random.randint(0, len(loc_subset) - 1)]\n response = \"api_call {target} {action} {location}\".format(**entities)\n spoken_dialog = \"%s the %s in the %s\\t%s\" % (\n entities['action'],\n entities['target'],\n entities['location'],\n response\n )\n if entities['action'] in ['on', 'off']:\n spoken_dialog = \"turn \" + spoken_dialog\n return spoken_dialog, entities\n\n\ndef single_state_dialog_gen(count=25):\n text = \"\"\n for dialog in range(count):\n entities = {\"target\": None, \"location\": None, \"action\": None}\n spoken_dialog, entities = _single_state_dialog(entities)\n text += \"1 %s\\n\" % spoken_dialog\n text += '\\n'\n return text\n\n\ntext_three_states = three_state_dialog_gen(50)\ntext_single_state = single_state_dialog_gen(25)\nwith open(\"../data/auto-dialog.txt\", 'w') as fd:\n fd.write(text_three_states*4)\n\n","sub_path":"project/HCN/tools/data_gen.py","file_name":"data_gen.py","file_ext":"py","file_size_in_byte":3896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"58931237","text":"import os\r\nimport numpy as np\r\nimport tensorflow\r\nfrom tensorflow.keras import layers\r\nfrom tensorflow.keras import Model\r\nfrom tensorflow.keras import utils\r\nfrom tensorflow.keras import backend\r\nimport tensorflow as tf\r\nfrom tensorflow.keras.utils import plot_model\r\nfrom retarget import Retarget\r\nfrom model_helpers import Normalize, Invert, GausBlur, WeightedAdd\r\nfrom squeeze_excite import SEBlock, SELayer\r\nfrom BAM import BAMLayer, BAMBlock\r\nfrom CBAM import CBAMLayer\r\n\r\nBASE_WEIGHTS_PATH = (\r\n 'https://github.com/keras-team/keras-applications/'\r\n 'releases/download/resnet/')\r\nWEIGHTS_HASHES = {\r\n 'resnet50': ('2cb95161c43110f7111970584f804107',\r\n '4d473c1dd8becc155b73f8504c6f6626'),\r\n 'resnet101': ('f1aeb4b969a6efcfb50fad2f0c20cfc5',\r\n '88cf7a10940856eca736dc7b7e228a21'),\r\n 'resnet152': ('100835be76be38e30d865e96f2aaae62',\r\n 'ee4c566cf9a93f14d82f913c2dc6dd0c'),\r\n 'resnet50v2': ('3ef43a0b657b3be2300d5770ece849e0',\r\n 'fac2f116257151a9d068a22e544a4917'),\r\n 'resnet101v2': ('6343647c601c52e1368623803854d971',\r\n 'c0ed64b8031c3730f411d2eb4eea35b5'),\r\n 'resnet152v2': ('a49b44d1979771252814e80f8ec446f9',\r\n 'ed17cf2e0169df9d443503ef94b23b33'),\r\n 'resnext50': ('67a5b30d522ed92f75a1f16eef299d1a',\r\n '62527c363bdd9ec598bed41947b379fc'),\r\n 'resnext101': ('34fb605428fcc7aa4d62f44404c11509',\r\n '0f678c91647380debd923963594981b3')\r\n}\r\n\r\n\r\ndef block1(x, filters, kernel_size=3, stride=1,\r\n conv_shortcut=True, name=None, att_type=''):\r\n \"\"\"A residual block.\r\n # Arguments\r\n x: input tensor.\r\n filters: integer, filters of the bottleneck layer.\r\n kernel_size: default 3, kernel size of the bottleneck layer.\r\n stride: default 1, stride of the first layer.\r\n conv_shortcut: default True, use convolution shortcut if True,\r\n otherwise identity shortcut.\r\n name: string, block label.\r\n # Returns\r\n Output tensor for the residual block.\r\n \"\"\"\r\n bn_axis = 3\r\n\r\n if conv_shortcut is True:\r\n shortcut = layers.Conv2D(4 * filters, 1, strides=stride,\r\n name=name + '_0_conv')(x)\r\n shortcut = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5,\r\n name=name + '_0_bn')(shortcut)\r\n else:\r\n shortcut = x\r\n\r\n x = layers.Conv2D(filters, 1, strides=stride, name=name + '_1_conv')(x)\r\n x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5,\r\n name=name + '_1_bn')(x)\r\n x = layers.Activation('relu', name=name + '_1_relu')(x)\r\n\r\n x = layers.Conv2D(filters, kernel_size, padding='SAME',\r\n name=name + '_2_conv')(x)\r\n x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5,\r\n name=name + '_2_bn')(x)\r\n x = layers.Activation('relu', name=name + '_2_relu')(x)\r\n\r\n x = layers.Conv2D(4 * filters, 1, name=name + '_3_conv')(x)\r\n x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5,\r\n name=name + '_3_bn')(x)\r\n if att_type == 'SE':\r\n U = layers.Conv2D(filters=int(backend.int_shape(x)[bn_axis]),\r\n kernel_size=kernel_size,\r\n strides=(1, 1),\r\n padding='same')(x)\r\n x_attention = SELayer(reduction_ratio=16)(U)\r\n x = layers.multiply([x, x_attention])\r\n elif att_type == 'CBAM':\r\n x = CBAMLayer(reduction_ratio=16, kernel_size=7)(x)\r\n\r\n x = layers.Add(name=name + '_add')([shortcut, x])\r\n return x\r\n\r\n\r\ndef stack1(x, filters, blocks, stride1=2, name=None, att_type=''):\r\n \"\"\"A set of stacked residual blocks.\r\n # Arguments\r\n x: input tensor.\r\n filters: integer, filters of the bottleneck layer in a block.\r\n blocks: integer, blocks in the stacked blocks.\r\n stride1: default 2, stride of the first layer in the first block.\r\n name: string, stack label.\r\n # Returns\r\n Output tensor for the stacked blocks.\r\n \"\"\"\r\n\r\n if att_type == 'BAM':\r\n if name == 'conv3' or name == 'conv4' or name == 'conv5':\r\n x_attention = BAMLayer(reduction_ratio=16, dilation_val=4)(x)\r\n x_attention = layers.Activation('sigmoid')(x_attention)\r\n x_attention = tf.math.add(1.0, x_attention)\r\n x_attention = layers.multiply([x, x_attention])\r\n x = layers.Add()([x, x_attention])\r\n\r\n x = block1(x, filters, stride=stride1, name=name + '_block1', att_type=att_type)\r\n x = layers.Activation('relu', name=name + '_block1' + '_out')(x)\r\n\r\n if att_type == 'Retarget':\r\n if name == 'conv3':\r\n x_attention1 = layers.DepthwiseConv2D(kernel_size=5,\r\n strides=(1, 1),\r\n padding='same')(x)\r\n x_attention2 = layers.Conv2D(filters=1,\r\n kernel_size=5,\r\n strides=(1, 1),\r\n padding='same')(x)\r\n x_attention = WeightedAdd()(x_attention1, x_attention2)\r\n # x_attention = layers.Activation('softmax')(x_attention)\r\n x_attention = Normalize()(x_attention)\r\n x = Retarget()([x, x_attention])\r\n if name == 'conv4':\r\n x_attention1 = layers.DepthwiseConv2D(kernel_size=5,\r\n strides=(1, 1),\r\n padding='same')(x)\r\n x_attention2 = layers.Conv2D(filters=1,\r\n kernel_size=5,\r\n strides=(1, 1),\r\n padding='same')(x)\r\n x_attention = WeightedAdd()(x_attention1, x_attention2)\r\n # x_attention = layers.Activation('softmax')(x_attention)\r\n x_attention = Normalize()(x_attention)\r\n x = Retarget()([x, x_attention])\r\n\r\n\r\n for i in range(2, blocks + 1):\r\n x = block1(x, filters, conv_shortcut=False, name=name + '_block' + str(i), att_type=att_type)\r\n x = layers.Activation('relu', name=name + '_block' + str(i) + '_out')(x)\r\n\r\n return x\r\n\r\n\r\ndef block2(x, filters, kernel_size=3, stride=1,\r\n conv_shortcut=False, name=None):\r\n \"\"\"A residual block.\r\n # Arguments\r\n x: input tensor.\r\n filters: integer, filters of the bottleneck layer.\r\n kernel_size: default 3, kernel size of the bottleneck layer.\r\n stride: default 1, stride of the first layer.\r\n conv_shortcut: default False, use convolution shortcut if True,\r\n otherwise identity shortcut.\r\n name: string, block label.\r\n # Returns\r\n Output tensor for the residual block.\r\n \"\"\"\r\n bn_axis = 3\r\n\r\n preact = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5,\r\n name=name + '_preact_bn')(x)\r\n preact = layers.Activation('relu', name=name + '_preact_relu')(preact)\r\n\r\n if conv_shortcut is True:\r\n shortcut = layers.Conv2D(4 * filters, 1, strides=stride,\r\n name=name + '_0_conv')(preact)\r\n else:\r\n shortcut = layers.MaxPooling2D(1, strides=stride)(x) if stride > 1 else x\r\n\r\n x = layers.Conv2D(filters, 1, strides=1, use_bias=False,\r\n name=name + '_1_conv')(preact)\r\n x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5,\r\n name=name + '_1_bn')(x)\r\n x = layers.Activation('relu', name=name + '_1_relu')(x)\r\n\r\n x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name=name + '_2_pad')(x)\r\n x = layers.Conv2D(filters, kernel_size, strides=stride,\r\n use_bias=False, name=name + '_2_conv')(x)\r\n x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5,\r\n name=name + '_2_bn')(x)\r\n x = layers.Activation('relu', name=name + '_2_relu')(x)\r\n\r\n x = layers.Conv2D(4 * filters, 1, name=name + '_3_conv')(x)\r\n x = layers.Add(name=name + '_out')([shortcut, x])\r\n return x\r\n\r\n\r\ndef stack2(x, filters, blocks, stride1=2, name=None):\r\n \"\"\"A set of stacked residual blocks.\r\n # Arguments\r\n x: input tensor.\r\n filters: integer, filters of the bottleneck layer in a block.\r\n blocks: integer, blocks in the stacked blocks.\r\n stride1: default 2, stride of the first layer in the first block.\r\n name: string, stack label.\r\n # Returns\r\n Output tensor for the stacked blocks.\r\n \"\"\"\r\n x = block2(x, filters, conv_shortcut=True, name=name + '_block1')\r\n for i in range(2, blocks):\r\n x = block2(x, filters, name=name + '_block' + str(i))\r\n x = block2(x, filters, stride=stride1, name=name + '_block' + str(blocks))\r\n return x\r\n\r\n\r\ndef block3(x, filters, kernel_size=3, stride=1, groups=32,\r\n conv_shortcut=True, name=None):\r\n \"\"\"A residual block.\r\n # Arguments\r\n x: input tensor.\r\n filters: integer, filters of the bottleneck layer.\r\n kernel_size: default 3, kernel size of the bottleneck layer.\r\n stride: default 1, stride of the first layer.\r\n groups: default 32, group size for grouped convolution.\r\n conv_shortcut: default True, use convolution shortcut if True,\r\n otherwise identity shortcut.\r\n name: string, block label.\r\n # Returns\r\n Output tensor for the residual block.\r\n \"\"\"\r\n bn_axis = 3\r\n\r\n if conv_shortcut is True:\r\n shortcut = layers.Conv2D((64 // groups) * filters, 1, strides=stride,\r\n use_bias=False, name=name + '_0_conv')(x)\r\n shortcut = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5,\r\n name=name + '_0_bn')(shortcut)\r\n else:\r\n shortcut = x\r\n\r\n x = layers.Conv2D(filters, 1, use_bias=False, name=name + '_1_conv')(x)\r\n x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5,\r\n name=name + '_1_bn')(x)\r\n x = layers.Activation('relu', name=name + '_1_relu')(x)\r\n\r\n c = filters // groups\r\n x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name=name + '_2_pad')(x)\r\n x = layers.DepthwiseConv2D(kernel_size, strides=stride, depth_multiplier=c,\r\n use_bias=False, name=name + '_2_conv')(x)\r\n kernel = np.zeros((1, 1, filters * c, filters), dtype=np.float32)\r\n for i in range(filters):\r\n start = (i // c) * c * c + i % c\r\n end = start + c * c\r\n kernel[:, :, start:end:c, i] = 1.\r\n x = layers.Conv2D(filters, 1, use_bias=False, trainable=False,\r\n kernel_initializer={'class_name': 'Constant',\r\n 'config': {'value': kernel}},\r\n name=name + '_2_gconv')(x)\r\n x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5,\r\n name=name + '_2_bn')(x)\r\n x = layers.Activation('relu', name=name + '_2_relu')(x)\r\n\r\n x = layers.Conv2D((64 // groups) * filters, 1,\r\n use_bias=False, name=name + '_3_conv')(x)\r\n x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5,\r\n name=name + '_3_bn')(x)\r\n\r\n x = layers.Add(name=name + '_add')([shortcut, x])\r\n x = layers.Activation('relu', name=name + '_out')(x)\r\n return x\r\n\r\n\r\ndef stack3(x, filters, blocks, stride1=2, groups=32, name=None):\r\n \"\"\"A set of stacked residual blocks.\r\n # Arguments\r\n x: input tensor.\r\n filters: integer, filters of the bottleneck layer in a block.\r\n blocks: integer, blocks in the stacked blocks.\r\n stride1: default 2, stride of the first layer in the first block.\r\n groups: default 32, group size for grouped convolution.\r\n name: string, stack label.\r\n # Returns\r\n Output tensor for the stacked blocks.\r\n \"\"\"\r\n x = block3(x, filters, stride=stride1, groups=groups, name=name + '_block1')\r\n for i in range(2, blocks + 1):\r\n x = block3(x, filters, groups=groups, conv_shortcut=False,\r\n name=name + '_block' + str(i))\r\n return x\r\n\r\n\r\ndef ResNet(stack_fn,\r\n preact,\r\n use_bias,\r\n model_name='resnet',\r\n include_top=True,\r\n weights='imagenet',\r\n input_tensor=None,\r\n input_shape=None,\r\n pooling=None,\r\n classes=1000,\r\n batch_size=16,\r\n pth_hist='',\r\n att_type='',\r\n **kwargs):\r\n \"\"\"Instantiates the ResNet, ResNetV2, and ResNeXt architecture.\r\n Optionally loads weights pre-trained on ImageNet.\r\n Note that the data format convention used by the model is\r\n the one specified in your Keras config at `~/.keras/keras.json`.\r\n # Arguments\r\n stack_fn: a function that returns output tensor for the\r\n stacked residual blocks.\r\n preact: whether to use pre-activation or not\r\n (True for ResNetV2, False for ResNet and ResNeXt).\r\n use_bias: whether to use biases for convolutional layers or not\r\n (True for ResNet and ResNetV2, False for ResNeXt).\r\n model_name: string, model name.\r\n include_top: whether to include the fully-connected\r\n layer at the top of the network.\r\n weights: one of `None` (random initialization),\r\n 'imagenet' (pre-training on ImageNet),\r\n or the path to the weights file to be loaded.\r\n input_tensor: optional Keras tensor\r\n (i.e. output of `layers.Input()`)\r\n to use as image input for the model.\r\n input_shape: optional shape tuple, only to be specified\r\n if `include_top` is False (otherwise the input shape\r\n has to be `(224, 224, 3)` (with `channels_last` data format)\r\n or `(3, 224, 224)` (with `channels_first` data format).\r\n It should have exactly 3 inputs channels.\r\n pooling: optional pooling mode for feature extraction\r\n when `include_top` is `False`.\r\n - `None` means that the output of the model will be\r\n the 4D tensor output of the\r\n last convolutional layer.\r\n - `avg` means that global average pooling\r\n will be applied to the output of the\r\n last convolutional layer, and thus\r\n the output of the model will be a 2D tensor.\r\n - `max` means that global max pooling will\r\n be applied.\r\n classes: optional number of classes to classify images\r\n into, only to be specified if `include_top` is True, and\r\n if no `weights` argument is specified.\r\n # Returns\r\n A Keras model instance.\r\n # Raises\r\n ValueError: in case of invalid argument for `weights`,\r\n or invalid input shape.\r\n \"\"\"\r\n if not (weights in {'imagenet', None} or os.path.exists(weights)):\r\n raise ValueError('The `weights` argument should be either '\r\n '`None` (random initialization), `imagenet` '\r\n '(pre-training on ImageNet), '\r\n 'or the path to the weights file to be loaded.')\r\n\r\n if weights == 'imagenet' and include_top and classes != 1000:\r\n raise ValueError('If using `weights` as `\"imagenet\"` with `include_top`'\r\n ' as true, `classes` should be 1000')\r\n if att_type not in ['baseline', 'SE', 'BAM', 'CBAM', 'Retarget']:\r\n raise ValueError('Custom Attention Module of required type is required to train'\r\n 'custom models')\r\n if input_shape != (224,224,3):\r\n raise ValueError('Image dimesions need to be of the size 224 x 224')\r\n\r\n # Determine proper input shape\r\n\r\n img_input = layers.Input(shape=input_shape, batch_size = batch_size)\r\n bn_axis = 3\r\n\r\n x = layers.ZeroPadding2D(padding=((3, 3), (3, 3)), name='conv1_pad')(img_input)\r\n x = layers.Conv2D(64, 7, strides=2, use_bias=use_bias, name='conv1_conv')(x)\r\n\r\n if preact is False:\r\n x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5,\r\n name='conv1_bn')(x)\r\n x = layers.Activation('relu', name='conv1_relu')(x)\r\n\r\n x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name='pool1_pad')(x)\r\n x = layers.MaxPooling2D(3, strides=2, name='pool1_pool')(x)\r\n\r\n\r\n x = stack_fn(x, att_type=att_type)\r\n\r\n if preact is True:\r\n x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5,\r\n name='post_bn')(x)\r\n x = layers.Activation('relu', name='post_relu')(x)\r\n\r\n if include_top:\r\n x = layers.GlobalAveragePooling2D(name='avg_pool')(x)\r\n x = layers.Dense(classes, activation='softmax', name='probs')(x)\r\n else:\r\n if pooling == 'avg':\r\n x = layers.GlobalAveragePooling2D(name='avg_pool')(x)\r\n elif pooling == 'max':\r\n x = layers.GlobalMaxPooling2D(name='max_pool')(x)\r\n x = layers.Dense(classes, activation='softmax')(x)\r\n\r\n inputs = img_input\r\n\r\n # Create model.\r\n model = Model(inputs, x, name=model_name)\r\n\r\n # Load weights.\r\n if (weights == 'imagenet') and (model_name in WEIGHTS_HASHES):\r\n if include_top:\r\n file_name = model_name + '_weights_tf_dim_ordering_tf_kernels.h5'\r\n file_hash = WEIGHTS_HASHES[model_name][0]\r\n else:\r\n file_name = model_name + '_weights_tf_dim_ordering_tf_kernels_notop.h5'\r\n file_hash = WEIGHTS_HASHES[model_name][1]\r\n weights_path = utils.get_file(file_name,\r\n BASE_WEIGHTS_PATH + file_name,\r\n cache_subdir='models',\r\n file_hash=file_hash)\r\n by_name = True\r\n model.load_weights(weights_path, by_name=by_name)\r\n elif weights is not None:\r\n model.load_weights(weights, by_name=by_name)\r\n if pth_hist != '':\r\n plot_model(model, to_file=os.path.join(pth_hist, 'model.png'), dpi=300)\r\n\r\n return model\r\n\r\n\r\ndef ResNet50(include_top=True,\r\n weights='imagenet',\r\n input_tensor=None,\r\n input_shape=None,\r\n pooling=None,\r\n classes=1000,\r\n batch_size=16,\r\n pth_hist=None,\r\n **kwargs):\r\n def stack_fn(x, att_type=''):\r\n x = stack1(x, 64, 3, stride1=1, name='conv2', att_type=att_type)\r\n x = stack1(x, 128, 4, name='conv3', att_type=att_type)\r\n x = stack1(x, 256, 6, name='conv4', att_type=att_type)\r\n x = stack1(x, 512, 3, name='conv5', att_type=att_type)\r\n return x\r\n return ResNet(stack_fn, False, True, 'resnet50',\r\n include_top, weights,\r\n input_tensor, input_shape,\r\n pooling, classes, batch_size, pth_hist,\r\n **kwargs)\r\n\r\n\r\ndef ResNet101(include_top=True,\r\n weights='imagenet',\r\n input_tensor=None,\r\n input_shape=None,\r\n pooling=None,\r\n classes=1000,\r\n batch_size=16,\r\n pth_hist=None,\r\n **kwargs):\r\n def stack_fn(x, att_type=''):\r\n x = stack1(x, 64, 3, stride1=1, name='conv2', att_type=att_type)\r\n x = stack1(x, 128, 4, name='conv3', att_type=att_type)\r\n x = stack1(x, 256, 23, name='conv4', att_type=att_type)\r\n x = stack1(x, 512, 3, name='conv5', att_type=att_type)\r\n return x\r\n return ResNet(stack_fn, False, True, 'resnet101',\r\n include_top, weights,\r\n input_tensor, input_shape,\r\n pooling, classes, batch_size, pth_hist,\r\n **kwargs)\r\n\r\n\r\ndef ResNet152(include_top=True,\r\n weights='imagenet',\r\n input_tensor=None,\r\n input_shape=None,\r\n pooling=None,\r\n classes=1000,\r\n batch_size=16,\r\n pth_hist=None,\r\n **kwargs):\r\n def stack_fn(x, att_type=''):\r\n x = stack1(x, 64, 3, stride1=1, name='conv2', att_type=att_type)\r\n x = stack1(x, 128, 8, name='conv3', att_type=att_type)\r\n x = stack1(x, 256, 36, name='conv4', att_type=att_type)\r\n x = stack1(x, 512, 3, name='conv5', att_type=att_type)\r\n return x\r\n return ResNet(stack_fn, False, True, 'resnet152',\r\n include_top, weights,\r\n input_tensor, input_shape,\r\n pooling, classes, batch_size, pth_hist,\r\n **kwargs)\r\n\r\n\r\ndef ResNet50V2(include_top=True,\r\n weights='imagenet',\r\n input_tensor=None,\r\n input_shape=None,\r\n pooling=None,\r\n classes=1000,\r\n batch_size=16,\r\n pth_hist=None,\r\n **kwargs):\r\n def stack_fn(x):\r\n x = stack2(x, 64, 3, name='conv2')\r\n x = stack2(x, 128, 4, name='conv3')\r\n x = stack2(x, 256, 6, name='conv4')\r\n x = stack2(x, 512, 3, stride1=1, name='conv5')\r\n return x\r\n return ResNet(stack_fn, True, True, 'resnet50v2',\r\n include_top, weights,\r\n input_tensor, input_shape,\r\n pooling, classes, batch_size, pth_hist,\r\n **kwargs)\r\n\r\n\r\ndef ResNet101V2(include_top=True,\r\n weights='imagenet',\r\n input_tensor=None,\r\n input_shape=None,\r\n pooling=None,\r\n classes=1000,\r\n batch_size=16,\r\n pth_hist=None,\r\n **kwargs):\r\n def stack_fn(x):\r\n x = stack2(x, 64, 3, name='conv2')\r\n x = stack2(x, 128, 4, name='conv3')\r\n x = stack2(x, 256, 23, name='conv4')\r\n x = stack2(x, 512, 3, stride1=1, name='conv5')\r\n return x\r\n return ResNet(stack_fn, True, True, 'resnet101v2',\r\n include_top, weights,\r\n input_tensor, input_shape,\r\n pooling, classes, batch_size, pth_hist,\r\n **kwargs)\r\n\r\n\r\ndef ResNet152V2(include_top=True,\r\n weights='imagenet',\r\n input_tensor=None,\r\n input_shape=None,\r\n pooling=None,\r\n classes=1000,\r\n batch_size=16,\r\n pth_hist=None,\r\n **kwargs):\r\n def stack_fn(x):\r\n x = stack2(x, 64, 3, name='conv2')\r\n x = stack2(x, 128, 8, name='conv3')\r\n x = stack2(x, 256, 36, name='conv4')\r\n x = stack2(x, 512, 3, stride1=1, name='conv5')\r\n return x\r\n return ResNet(stack_fn, True, True, 'resnet152v2',\r\n include_top, weights,\r\n input_tensor, input_shape,\r\n pooling, classes, batch_size, pth_hist,\r\n **kwargs)\r\n\r\n\r\ndef ResNeXt50(include_top=True,\r\n weights='imagenet',\r\n input_tensor=None,\r\n input_shape=None,\r\n pooling=None,\r\n classes=1000,\r\n batch_size=16,\r\n pth_hist=None,\r\n **kwargs):\r\n def stack_fn(x):\r\n x = stack3(x, 128, 3, stride1=1, name='conv2')\r\n x = stack3(x, 256, 4, name='conv3')\r\n x = stack3(x, 512, 6, name='conv4')\r\n x = stack3(x, 1024, 3, name='conv5')\r\n return x\r\n return ResNet(stack_fn, False, False, 'resnext50',\r\n include_top, weights,\r\n input_tensor, input_shape,\r\n pooling, classes, batch_size, pth_hist,\r\n **kwargs)\r\n\r\n\r\ndef ResNeXt101(include_top=True,\r\n weights='imagenet',\r\n input_tensor=None,\r\n input_shape=None,\r\n pooling=None,\r\n classes=1000,\r\n batch_size=16,\r\n pth_hist=None,\r\n **kwargs):\r\n def stack_fn(x):\r\n x = stack3(x, 128, 3, stride1=1, name='conv2')\r\n x = stack3(x, 256, 4, name='conv3')\r\n x = stack3(x, 512, 23, name='conv4')\r\n x = stack3(x, 1024, 3, name='conv5')\r\n return x\r\n return ResNet(stack_fn, False, False, 'resnext101',\r\n include_top, weights,\r\n input_tensor, input_shape,\r\n pooling, classes, batch_size, pth_hist,\r\n **kwargs)\r\n","sub_path":"models/custom_common_resnet.py","file_name":"custom_common_resnet.py","file_ext":"py","file_size_in_byte":24678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"587388094","text":"from urllib import parse\nfrom urllib.request import urlopen,Request\nimport simplejson\n\nurl = \"https://movie.douban.com/j/search_subjects\"\nd = {\n \"type\":\"tv\",\n \"tag\":\"热门\",\n \"page_limit\":10,\n \"page_start\":10\n}\nua = \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36\"\n\n\nreq = Request('{}?{}'.format(url, parse.urlencode(d)),headers={\"User-agent\":ua})\n\nwith urlopen(req) as res:\n text = simplejson.loads(res.read())\n print(len(text['subjects']))\n print(text)","sub_path":"file/t1/t4.py","file_name":"t4.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"208436485","text":"# Вес\nweight = float(input())\n# Рост\ngrowth = float(input())\n# Результат\nresult = weight / (growth * 2)\n\nif result < 18.5:\n print('Недостаточный вес')\nelif result >= 18.5 and result <= 25:\n print('Нормальный')\nelif result >= 25 and result <= 30:\n print('Избыточный вес')\nelif result >= 30:\n print('Ожирение')","sub_path":"7/tasks/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"643763525","text":"class Table():\n\n def __init__(self):\n self._table_name = None\n self._table_headers = None\n self._table_content = None\n self._column_count = None\n self._sorted = False\n self._grouped = False\n self._sort_by_header = None\n self._group_by_header = None\n\n @property\n def table_content(self):\n \"\"\"Table content\"\"\"\n return self._table_content\n\n @table_content.setter\n def table_content(self, table_content):\n \"\"\"\n Sets table content.\n Makes sure that the number of data entries for each column is the same.\n Makes sure that the number of table headers is equal to the number of columns.\n \"\"\"\n self._column_count = len(table_content[0])\n for i in range(1, len(table_content)):\n if len(table_content[i]) != self._column_count:\n raise ValueError(\"The number of entries varies between the columns!\")\n if self._table_headers and len(self._table_headers) != self._column_count:\n raise ValueError(\n \"The number of table headers is not equal to the number of columns!\")\n\n self._table_content = table_content\n\n @property\n def table_name(self):\n \"\"\"Table name\"\"\"\n return self._table_name\n\n @table_name.setter\n def table_name(self, table_name):\n \"\"\"\n Sets table name to be printed together with table data.\n \"\"\"\n self._table_name = table_name\n\n @property\n def table_headers(self):\n \"\"\"Table headers (column headers)\"\"\"\n return self._table_headers\n\n @table_headers.setter\n def table_headers(self, table_headers):\n \"\"\"\n Sets table headers to be printed together with table data.\n Makes sure that the number of table headers is equal to the number of columns.\n \"\"\"\n if self._column_count and len(table_headers) != self._column_count:\n raise ValueError(\n \"The number of table headers is not equal to the number of columns!\")\n self._table_headers = table_headers\n\n def double_list_to_table(self, double_list):\n self.table_headers = double_list[0]\n self.table_content = double_list[1:]\n\n @property\n def table(self):\n \"\"\"Table headers + table content\"\"\"\n return self._table_headers + self._table_content\n\n def sort_table(self, header, descending=True):\n \"\"\"\n Sorts the table by the specified header in the specified order.\n \"\"\"\n self._sort_by_header = header\n column_index = self.table_headers.index(header)\n self._descending = descending\n\n self._table_content.sort(key=lambda t: t[column_index], reverse=not self._descending)\n self._sorted = True\n \"\"\"\n It's easy to print a sorted table. Just remember to replace\n three table_header_underscore_symbol characters above the sorted\n column with three '^^^' characters (or whatever).\n\n Table printing is to be implemented in a separate class.\n \"\"\"\n\n def group_table(self, header):\n \"\"\"\n Groups the table by the specified header in the specified order.\n \"\"\"\n # self.column_count = len(self.table_headers)\n self._group_by_header = header\n self._column_index = self.table_headers.index(header)\n\n if self._column_index == self._column_count - 1:\n self._table_content = (self._table_content[self._column_count-1] +\n self._table_content[self._column_count-1:])\n elif self._column_index > 0:\n self._table_content = (self._table_content[self._column_index] +\n self._table_content[:self._column_index] +\n self._table_content[self._column_index+1])\n\n self.sort_table(self.group_by_header, self.descending)\n self._grouped = True\n \"\"\"\n It's easy to print a grouped table. Just remember to skip\n duplicate entries in the sorted column, and also print a\n separator line above each data line that contains a\n nonduplicate entry (except for the first data line).\n\n Table printing is to be implemented in a separate class.\n \"\"\"\n\n\nif (__name__ == '__main__'):\n pass\n\n table_name = 'Ingested Keywords'\n table_header = ['Keyword', 'Instances', 'Groups', 'Status', 'File Name']\n table_content = [\n [ '#', 1, 0, 'OK', 'viper.obj' ],\n [ 'mtllib', 0, 1, 'OK', 'viper.obj' ],\n [ 'illum', 1, 0, 'OK', 'viper.mtl' ],\n [ 'Kd', 1, 0, 'OK', 'viper.mtl' ],\n [ 'Ka', 1, 0, 'OK', 'viper.mtl' ],\n [ 'Tf', 1, 0, 'OK', 'viper.mtl' ],\n [ 'map_Kd', 1, 0, 'OK', 'viper.mtl' ],\n [ 'v', 485, 0, 'OK', 'viper.obj' ],\n [ 'f', 950, 0, 'OK', 'viper.obj' ]\n ]\n\n table = Table()\n table.table_content = table_content\n table.table_name = table_name\n table.table_headers = table_header\n\n print(table.table_name)\n print(\"\")\n print(table.table_headers)\n for row in table.table_content:\n print(row)\n\n #table.format_table()\n #table.print_table()\n","sub_path":"BasicTable/Table.py","file_name":"Table.py","file_ext":"py","file_size_in_byte":5164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"240841916","text":"import logging\nimport time\n\nfrom cumulusci.robotframework.utils import selenium_retry\n\nfrom robot.libraries.BuiltIn import BuiltIn\nfrom selenium.webdriver.common.keys import Keys\nfrom locator import eda_lex_locators\n\n\n@selenium_retry\nclass EDA(object):\n\n ROBOT_LIBRARY_SCOPE = \"GLOBAL\"\n ROBOT_LIBRARY_VERSION = 1.0\n\n def __init__(self, debug=False):\n self.debug = debug\n self.current_page = None\n self._session_records = []\n # Turn off info logging of all http requests\n logging.getLogger(\"requests.packages.urllib3.connectionpool\").setLevel(\n logging.WARN\n )\n\n @property\n def builtin(self):\n return BuiltIn()\n\n @property\n def cumulusci(self):\n return self.builtin.get_library_instance(\"cumulusci.robotframework.CumulusCI\")\n\n @property\n def selenium(self):\n return self.builtin.get_library_instance(\"SeleniumLibrary\")\n\n def populate_address(self, loc, value):\n \"\"\" Populate address with Place Holder aka Mailing Street etc as a locator\n and actual value of the place holder.\n \"\"\"\n xpath = eda_lex_locators[\"mailing_address\"].format(loc)\n field = self.selenium.get_webelement(xpath)\n field.send_keys(value)\n\n def click_record_button(self, title):\n \"\"\" Pass title of the button to click the buttons on the records edit page. Most common buttons are: save and cancel.\n \"\"\"\n locator = eda_lex_locators[\"record\"][\"button\"].format(title)\n self.selenium.set_focus_to_element(locator)\n button = self.selenium.get_webelement(locator)\n button.click()\n time.sleep(5)\n\n def select_tab(self, title):\n \"\"\" Switch between different tabs on a record page like Related, Details, News, Activity and Chatter\n Pass title of the tab\n \"\"\"\n locator = eda_lex_locators[\"tab\"].format(title)\n self.selenium.wait_until_page_contains_element(locator, timeout=60,\n error=\"'\" + title + \"' list header is not available on the page\")\n self.selenium.set_focus_to_element(locator)\n self.selenium.get_webelement(locator).click()\n time.sleep(5)\n\n def click_special_related_list_button(self, heading, button_title):\n \"\"\" To Click on a related list button which would open up a new lightning page rather than a modal.\n Pass the list name and button name\n \"\"\"\n locator = eda_lex_locators[\"record\"][\"related\"][\"button\"].format(\n heading, button_title\n )\n self.selenium.set_focus_to_element(locator)\n self.selenium.get_webelement(locator).click()\n\n def click_dropdown(self, title):\n locator = eda_lex_locators[\"record\"][\"list\"].format(title)\n self.selenium.set_focus_to_element(locator)\n self.selenium.get_webelement(locator).click()\n\n def pick_date(self, value):\n \"\"\"To pick a date from the date picker\"\"\"\n locator = eda_lex_locators[\"record\"][\"datepicker\"].format(value)\n self.selenium.set_focus_to_element(locator)\n self.selenium.get_webelement(locator).click()\n\n def select_row(self, value):\n \"\"\"To select a row on object page based on name and open the dropdown\"\"\"\n drop_down = eda_lex_locators[\"locating_delete_dropdown\"].format(value)\n self.selenium.get_webelement(drop_down).click()\n # self.selenium.get_webelement(drop_down).click()\n\n def select_related_row(self, value):\n \"\"\"To select row from a related list based on name and open the dropdown\"\"\"\n locators = eda_lex_locators[\"related_name\"]\n list_ele = self.selenium.get_webelements(locators)\n index = 1\n for locator in list_ele:\n if locator.text != value:\n index = index + 1\n else:\n drop_down = eda_lex_locators[\"rel_loc_dd\"].format(index)\n self.selenium.get_webelement(drop_down).click()\n self.selenium.get_webelement(drop_down).click()\n\n def delete_icon(self, field_name, value):\n \"\"\"To click on x \"\"\"\n locator = eda_lex_locators[\"delete_icon\"].format(field_name, value)\n self.selenium.get_webelement(locator).click()\n\n def click_edit_button(self, title):\n locator = eda_lex_locators[\"record\"][\"edit_button\"].format(title)\n self.selenium.get_webelement(locator).click()\n\n def click_id(self, title):\n locator = eda_lex_locators[\"aff_id\"].format(title)\n self.selenium.get_webelement(locator).click()\n\n def select_object_dropdown(self):\n locator = eda_lex_locators[\"object_dd\"]\n self.selenium.get_webelement(locator).click()\n\n def check_status(self, acc_name):\n aff_list = eda_lex_locators[\"aff_status\"].format(acc_name)\n aff_list_text = self.selenium.get_webelement(aff_list).text\n self.aff_id = eda_lex_locators[\"aff_id\"].format(acc_name)\n self.aff_id_text = self.selenium.get_webelement(self.aff_id).text\n return self.aff_id_text, aff_list_text\n\n def get_id(self):\n locator = eda_lex_locators[\"click_aff_id\"].format(self.aff_id_text)\n self.selenium.get_webelement(locator).click()\n\n def confirm_status(self, field, status):\n locator = eda_lex_locators[\"check_status\"].format(field, status)\n verify_former = self.selenium.get_webelement(locator).text\n return verify_former\n\n def verify_field_value(self, field, value):\n locator = eda_lex_locators[\"check_field\"].format(field, value)\n verify_former = self.selenium.get_webelement(locator).text\n return verify_former\n\n def verify_record(self, name):\n \"\"\" Checks for the record in the object page and returns true if found else returns false\n \"\"\"\n locator = eda_lex_locators[\"account_list\"].format(name)\n self.selenium.page_should_contain_element(locator)\n\n def select_option(self, name):\n \"\"\"selects various options in Contact>New opportunity page using name\n \"\"\"\n locator = eda_lex_locators[\"dd_options\"].format(name)\n self.selenium.get_webelement(locator).click()\n\n def verify_related_list_items(self, list_name, value):\n \"\"\"Verifies a specified related list has specified value(doesn't work if the list is in table format)\"\"\"\n locator = eda_lex_locators[\"related_list_items\"].format(list_name, value)\n self.selenium.page_should_contain_element(locator)\n\n def click_managehh_special_button(self, title):\n \"\"\"clicks on the new contact button on manage hh page\"\"\"\n locator = eda_lex_locators[\"manage_hh_page\"][\"mhh_button\"].format(title)\n self.selenium.get_webelement(locator).click()\n\n def header_field_value(self, title, value):\n \"\"\"Validates if the specified header field has specified value\"\"\"\n locator = eda_lex_locators[\"header_field_value\"].format(title, value)\n self.selenium.page_should_contain_element(locator)\n\n def verify_affiliated_contact(self, list_name, first_name, last_name, y):\n \"\"\"Validates if the affiliated contacts have the added contact details enter Y for positive case and N for negative case\"\"\"\n locator = eda_lex_locators[\"affiliated_contacts\"].format(\n list_name, first_name, last_name\n )\n if y.upper() == \"Y\":\n self.selenium.page_should_contain_element(locator)\n elif y.upper() == \"N\":\n self.selenium.page_should_not_contain_element(locator)\n\n def fill_address_form(self, **kwargs):\n \"\"\"Validates if the affiliated contacts have the added contact details enter Y for positive case and N for negative case\"\"\"\n for label, value in kwargs.items():\n locator = eda_lex_locators[\"manage_hh_page\"][\"address\"].format(\n label, value\n )\n if label == \"Street\":\n locator = locator + \"textarea\"\n self.selenium.get_webelement(locator).send_keys(value)\n else:\n locator = locator + \"input\"\n self.selenium.get_webelement(locator).send_keys(value)\n\n def verify_details_address(self, field, value):\n \"\"\"Validates if the details page address field has specified value\"\"\"\n locator = eda_lex_locators[\"detail_page\"][\"address\"].format(field, value)\n self.selenium.page_should_contain_element(locator)\n\n def validate_checkbox(self, name, checkbox_title):\n \"\"\"validates all 3 checkboxes for contact on manage hh page and returns locator for the checkbox thats required\"\"\"\n\n locator = eda_lex_locators[\"manage_hh_page\"][\"mhh_checkbox\"].format(\n name, \"fauxCBInformal\"\n )\n self.selenium.page_should_contain_element(locator)\n\n locator = eda_lex_locators[\"manage_hh_page\"][\"mhh_checkbox\"].format(\n name, \"fauxCBFormal\"\n )\n self.selenium.page_should_contain_element(locator)\n\n locator = eda_lex_locators[\"manage_hh_page\"][\"mhh_checkbox\"].format(\n name, \"fauxCBExName\"\n )\n self.selenium.page_should_contain_element(locator)\n\n if checkbox_title == \"Informal Greeting\":\n locator = eda_lex_locators[\"manage_hh_page\"][\"mhh_checkbox\"].format(\n name, \"fauxCBInformal\"\n )\n elif checkbox_title == \"Formal Greeting\":\n locator = eda_lex_locators[\"manage_hh_page\"][\"mhh_checkbox\"].format(\n name, \"fauxCBFormal\"\n )\n elif checkbox_title.capitalize() == \"Household Name\":\n locator = eda_lex_locators[\"manage_hh_page\"][\"mhh_checkbox\"].format(\n name, \"fauxCBExName\"\n )\n return locator\n\n def check_field_value(self, title, value):\n \"\"\"checks value of a field in details page(section without header)\"\"\"\n locator = eda_lex_locators[\"detail_page\"][\"verify_field_value\"].format(\n title, value\n )\n self.selenium.page_should_contain_element(locator)\n\n def click_managehh_button(self, title):\n \"\"\"clicks on the new contact button on manage hh page\"\"\"\n locator = eda_lex_locators[\"manage_hh_page\"][\"button\"].format(title)\n self.selenium.get_webelement(locator).click()\n\n def select_modal_checkbox(self, title):\n \"\"\"\"\"\"\n locator = eda_lex_locators[\"modal\"][\"checkbox\"].format(title)\n self.selenium.get_webelement(locator).click()\n\n def verify_occurence(self, title, value):\n \"\"\"\"\"\"\n locator = eda_lex_locators[\"record\"][\"related\"][\"check_occurence\"].format(\n title, value\n )\n self.selenium.page_should_contain_element(locator)\n\n def select_related_dropdown(self, title):\n \"\"\"\"\"\"\n locator = eda_lex_locators[\"record\"][\"related\"][\"drop-down\"].format(title)\n self.selenium.get_webelement(locator).click()\n\n def get_header_date_value(self, title):\n \"\"\"Validates if the specified header field has specified value\"\"\"\n locator = eda_lex_locators[\"header_datepicker\"].format(title)\n date = self.selenium.get_webelement(locator).text\n return date\n\n def get_main_header(self):\n header = self.selenium.get_webelement(\"//h1/span\").text\n return header\n\n def verify_contact_role(self, name, role):\n \"\"\"verifies the contact role on opportunity page\"\"\"\n locator = eda_lex_locators[\"opportunity\"][\"contact_role\"].format(name, role)\n self.selenium.page_should_contain_element(locator)\n\n def select_relatedlist(self, title):\n \"\"\"click on the related list to open it\"\"\"\n locator = eda_lex_locators[\"record\"][\"related\"][\"title\"].format(title)\n self.selenium.get_webelement(locator).click()\n\n def verify_contact_roles(self, **kwargs):\n \"\"\"\"\"\"\n for name, value in kwargs.items():\n locator = eda_lex_locators[\"object\"][\"contact_role\"].format(name, value)\n self.selenium.page_should_contain_element(locator)\n\n def page_contains_record(self, title):\n \"\"\"Validates if the specified record is present on the page\"\"\"\n locator = eda_lex_locators[\"object\"][\"record\"].format(title)\n self.selenium.page_should_not_contain_element(locator)\n\n def select_checkbox_in_eda_settings(self, loc_check, loc_checkbox):\n \"\"\" Selects checkbox. Does nothing if checkbox is already checked \"\"\"\n if self.check_if_element_exists(loc_check):\n return\n else:\n self.selenium.click_button(\"Edit\")\n self.selenium.get_webelement(loc_checkbox).click()\n self.selenium.click_button(\"Save\")\n self.selenium.wait_until_element_is_visible(loc_check)\n return\n\n def check_if_element_exists(self, xpath):\n elements = int(self.selenium.get_matching_xpath_count(xpath))\n return True if elements > 0 else False\n\n def get_eda_locator(self, path, *args, **kwargs):\n \"\"\" Returns a rendered locator string from the eda_lex_locators\n dictionary. This can be useful if you want to use an element in\n a different way than the built in keywords allow.\n \"\"\" \n locator = eda_lex_locators\n for key in path.split('.'):\n locator = locator[key]\n main_loc = locator.format(*args, **kwargs)\n return main_loc \n \n def wait_for_locator(self, path, *args, **kwargs):\n main_loc = self.get_eda_locator(path,*args, **kwargs) \n self.selenium.wait_until_element_is_visible(main_loc)\n \n def click_on_element(self,path, *args, **kwargs):\n main_loc = self.get_eda_locator(path,*args, **kwargs)\n self.selenium.click_element(main_loc)\n\n def populate_placeholder(self, loc, value):\n \"\"\" Populate placeholder element as a locator\n and actual value of the place holder.\n \"\"\"\n xpath = eda_lex_locators[\"input_placeholder\"].format(loc)\n field = self.selenium.get_webelement(xpath)\n field.send_keys(value)\n time.sleep(1)\n# if loc == (\"Search Contacts\" or \"Search Accounts\"):\n field.send_keys(Keys.ARROW_DOWN + Keys.ENTER)\n\n def edit_eda_settings_checkbox(self, checkbox_label, checkbox_toggle):\n \"\"\" Updates the checkbox_label value to checkbox_toggle in the EDA settings page \"\"\"\n locator_checkbox_default = eda_lex_locators[\"eda_settings\"][\"checkbox_default\"].format(checkbox_label)\n locator_checkbox = eda_lex_locators[\"eda_settings\"][\"checkbox\"].format(checkbox_label)\n locator_edit = eda_lex_locators[\"eda_settings\"][\"edit\"]\n locator_save = eda_lex_locators[\"eda_settings\"][\"save\"]\n\n checkbox_default = self.selenium.get_element_attribute(locator_checkbox_default, \"alt\")\n if checkbox_default == checkbox_toggle:\n return\n else:\n self.selenium.click_element(locator_edit)\n self.selenium.wait_until_page_contains_element(locator_checkbox,\n error=\"Checkbox not found on the page\")\n self.selenium.click_element(locator_checkbox)\n self.selenium.click_element(locator_save)\n self.verify_toast_message(\"Settings Saved Successfully.\")\n\n def verify_toast_message(self, value):\n \"\"\" Verifies the toast message \"\"\"\n locator = eda_lex_locators[\"toast_message\"].format(value)\n self.selenium.wait_until_page_contains_element(locator)\n\n def close_toast_message(self):\n \"\"\" Close the toast message banner \"\"\"\n locator = eda_lex_locators[\"toast_close\"]\n try:\n self.selenium.click_element(locator)\n except Exception:\n return\n\n def get_eda_namespace_prefix(self):\n \"\"\" Returns the EDA namespace value if the target org is a managed org else returns blank value \"\"\"\n if not hasattr(self.cumulusci, '_describe_result'):\n self.cumulusci._describe_result = self.cumulusci.sf.describe()\n objects = self.cumulusci._describe_result['sobjects']\n level_object = [o for o in objects if o['label'] == 'Program Plan'][0]\n return self.get_namespace_prefix(level_object['name'])\n\n def get_namespace_prefix(self, name):\n \"\"\"\" This is a helper function to capture the EDA namespace prefix of the target org \"\"\"\n parts = name.split('__')\n if parts[-1] == 'c':\n parts = parts[:-1]\n if len(parts) > 1:\n return parts[0] + '__'\n else:\n return ''\n","sub_path":"robot/EDA/resources/EDA.py","file_name":"EDA.py","file_ext":"py","file_size_in_byte":16540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"179252838","text":"from django.db.models import Q\nfrom django.http import HttpRequest\n\nfrom tastypie import fields\nfrom tastypie.resources import ModelResource\nfrom tastypie.constants import ALL, ALL_WITH_RELATIONS\n\nfrom gf_api.models import Festival, Location, Day\n\n\nclass LocationResource(ModelResource):\n class Meta:\n queryset = Location.objects.filter(lat__isnull=False).filter(lng__isnull=False)\n resource_name = 'location'\n filtering = {\n \"lat\": ('exact'),\n \"lng\": ('exact'),\n }\n\nclass LatLngResource(ModelResource):\n class Meta:\n queryset = Location.objects.all()\n fields = ['lat', 'lng', 'city']\n resource_name = 'latlng'\n filtering = {\n \"lat\": ('exact'),\n \"lng\": ('exact'),\n }\n\nclass FestivalResource(ModelResource):\n location = fields.ToOneField(LocationResource, 'location', full = True)\n days = fields.ToManyField('gf_api.api.resources.DayResource', 'days', null=True, full=True)\n # days = fields.ToManyField(DayResource, attribute=lambda f: Day.objects.filter(festival=f.obj), full=True, null=True)\n class Meta:\n filtering = {\n \"location\": ALL_WITH_RELATIONS,\n }\n allowed_methods = ['get']\n always_return_data = True\n resource_name = 'festival'\n # Filter badly geolocalized locations\n queryset = Festival.objects.all()\n\nclass DayResource(ModelResource):\n festival = fields.ToOneField(FestivalResource, 'festival')\n class Meta:\n queryset = Day.objects.all()\n resource_name = 'day'\n filtering = {\n \"festival\": ('exact'),\n }\n","sub_path":"gf_api/api/resources.py","file_name":"resources.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"438689063","text":"from Bio import SeqIO\nfrom time import sleep as sl\n\ninfile = open(\"/Users/liamcheneyy/Desktop/Mgtfi_ref/lociLocationsInRef.txt\").read().splitlines()\nreference_path = \"/Users/liamcheneyy/Desktop/ref/GCA_000006745.1.fasta\"\noutfile_path = \"/Users/liamcheneyy/Desktop/alleles/\"\n\nfor line in infile:\n col = line.split('\\t')\n locus = col[0]\n start = int(col[1]) - 1\n end = int(col[2])\n strand = col[-2]\n chr = col[-1]\n with open(outfile_path + locus + \".fasta\", 'w') as out:\n for record in SeqIO.parse(reference_path, \"fasta\"):\n if chr == record.id:\n seq = record.seq[start:end]\n\n if strand == '-':\n seq = record.seq[start:end].reverse_complement()\n\n out.write(\">\" + locus + \":1\" + '\\n')\n out.write(str(seq) + '\\n')\n print(locus, strand)\n\n\n","sub_path":"Vibrio/2019-03-07-locus_to_sequence.py","file_name":"2019-03-07-locus_to_sequence.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"315893262","text":"# pylint: disable=redefined-outer-name\n# pylint: disable=unused-argument\n# pylint: disable=unused-variable\n# pylint: disable=too-many-arguments\n\nimport os\n\nimport requests_mock\nfrom pytest_simcore.helpers.typing_env import EnvVarsDict\nfrom simcore_service_resource_usage_tracker._meta import API_VERSION\nfrom simcore_service_resource_usage_tracker.cli import app\nfrom simcore_service_resource_usage_tracker.core.settings import ApplicationSettings\nfrom typer.testing import CliRunner\n\n\ndef test_cli_help_and_version(cli_runner: CliRunner):\n result = cli_runner.invoke(app, \"--help\")\n assert result.exit_code == os.EX_OK, result.output\n\n result = cli_runner.invoke(app, \"--version\")\n assert result.exit_code == os.EX_OK, result.output\n assert result.stdout.strip() == API_VERSION\n\n\ndef test_list_settings(cli_runner: CliRunner, app_environment: EnvVarsDict):\n result = cli_runner.invoke(app, [\"settings\", \"--show-secrets\", \"--as-json\"])\n assert result.exit_code == os.EX_OK, result.output\n\n print(result.output)\n settings = ApplicationSettings.parse_raw(result.output)\n assert settings == ApplicationSettings.create_from_envs()\n\n\ndef test_evaluate_without_configuration_raises(\n cli_runner: CliRunner,\n):\n result = cli_runner.invoke(app, [\"evaluate\", \"1234\"])\n assert result.exit_code == 1, result.output\n\n\ndef test_evaluate(\n cli_runner: CliRunner,\n app_environment: EnvVarsDict,\n mocked_prometheus_with_query: requests_mock.Mocker,\n):\n result = cli_runner.invoke(app, [\"evaluate\", \"43817\"])\n assert result.exit_code == os.EX_OK, result.output\n","sub_path":"services/resource-usage-tracker/tests/unit/test_cli.py","file_name":"test_cli.py","file_ext":"py","file_size_in_byte":1598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"364555292","text":"import plotly.graph_objects as go\nimport pandas as pd\nimport scipy.stats as stats\n#import statsmodels.api as sm\n#import statsmodels.formula.api as smf\nfrom statsmodels.stats.multicomp import pairwise_tukeyhsd\nfrom statsmodels.stats.libqsturng import psturng\nimport numpy as np\nimport pingouin as pg\nimport math\n\n#from pingouin import pairwise_tukey\n#https://pingouin-stats.org/index.html - has mediation analysis\n\npathout='/Users/alex/AlexBadea_MyPapers/APOE22APOE33APOE44/fa_picsE3E4/'\n#####\nfile='/Users/alex/AlexBadea_MyPapers/APOE22APOE33APOE44/APOE22APOE33APOE44VolumesFA062620.xlsx'\ndf = pd.read_excel(file, sheet_name='APOE22APOE33APOE44FA')\n\n\nfileind='/Users/alex/AlexBadea_MyPapers/APOE22APOE33APOE44/my_pfa1.xlsx'\ndfind=pd.read_excel(fileind)\n\n\n#fileind='/Users/alex/AlexBadea_MyPapers/APOE22APOE33APOE44/my_pfa.xlsx'\n#dfind=pd.read_excel(fileind)\ndf_aov = dfind\ndf_aov.loc['ftest'] = [0 for c in df.columns]\ndf_aov.loc['paov'] = [0 for c in df.columns]\ndf_aov.loc['ttest'] = [0 for c in df.columns]\n\n\n\ndf_aov.to_csv(pathout+\"fa_sex_punc_pF.csv\")\ndfind.to_csv(pathout+\"dfindvol_sex_punc_pF.csv\")\n\n\ngroupstats = df.groupby(['Genotype']).agg(['mean', 'std'] )\ngroupstats.to_csv(pathout+'mean_std_vol.csv')\n\ngroupstats2 = df.groupby(['Genotype']).agg(['mean'] )\ngroupstats3 = df.groupby(['Genotype']).agg(['std'] )\ngroupstats4 = df.groupby(['Genotype']).agg(['mean', 'std'] )\n\n#print(groupstats3)\n#groupstats2.append(groupstats3,ignore_index=True)\ngroupstats2.to_csv(pathout+'fa_mean.csv')\ngroupstats3.to_csv(pathout+'fa_std.csv')\ngroupstats4.to_csv(pathout+'concat_fa_std.csv')\n\n#print (groupstats2.loc['APOE33'])\n\n\n'''\nci95_hi = []\nci95_lo = []\nprint(groupstats)\n\nfor i in groupstats.index:\n m, c, s = groupstats.loc[i]\n ci95_hi.append(m + 1.96*s/math.sqrt(c))\n ci95_lo.append(m - 1.96*s/math.sqrt(c))\n\ngroupstats['ci95_hi'] = ci95_hi\ngroupstats['ci95_lo'] = ci95_lo\nprint(groupstats)\n'''\n\n#for col in dfind.columns[8:]:\nfor col in dfind.columns[8:]:\n\n\n\n\t#if dfind[col][0] < 0.05:\n\t\tprint(col)\n\t\tfig = go.Figure()\n\n\t\t# print(df['Genotype'].isin(['APOE22','APOE33','APOE44']))\n\t\tdf_short=df[df['Genotype'].isin(['APOE22','APOE33','APOE44'])]\n \n \n\t\tt_value_g, p_value_g=stats.ttest_ind(df_short[col][df_short['Genotype'] == 'APOE33'],\n\t\t\tdf_short[col][df_short['Genotype'] == 'APOE44'])\n\n\t\t\n\n\t\tdf_aov.at['ttest',col] = t_value_g\n\t\tdf_aov.at['paov',col] = p_value_g\n\n\n\n\t\tGenotypes = ['APOE33', 'APOE44']\n\t\tcolors = [\"red\",\"green\",\"purple\"]\n\t\tfor i in range(2):\n\t\t\tgenotype = Genotypes[i]\n\t\t\tcolor = colors[i]\n\t\t\tfig.add_trace(go.Violin(x=df_short['Genotype'][df_short['Genotype'] == genotype][df_short['Sex'] == 'male'],\n\t\t\t\t\t\t\t\t\ty=df_short[col][df['Genotype'] == genotype][df_short['Sex'] == 'male'], name=genotype+' male',\n\t\t\t\t\t\t\t\t\t#legendgroup='M', scalegroup='M', #name='M',\n\t\t\t\t\t\t\t\t\tbox_visible=True, meanline_visible=True,line_color='blue'))\n\t\t\tfig.add_trace(go.Violin(x=df_short['Genotype'][df['Genotype'] == genotype][df_short['Sex'] == 'female'],\n\t\t\t\t\t\t\t\t\ty=df_short[col][df_short['Genotype'] == genotype][df_short['Sex'] == 'female'], name=genotype+' female',\n\t\t\t\t\t\t\t\t\t#legendgroup='F', scalegroup='F', #name='F',\n\t\t\t\t\t\t\t\t\tbox_visible=True, meanline_visible=True,line_color='orange'))\n\n\t\tfig.update_traces(box_visible=True, meanline_visible=True, points='all', jitter=0.05)\n\t\tfig.update_layout(violinmode='group')\n\t\tfig.update_layout(\n\t\t\t#title=col + \" p = \" + str(dfind[col][0]) + ' '+ \"genotype:\" + str(p_value_g)+ ' ' + \"genotype and sex\" + str(p_value_gs),\n\t\t title= col + \" p = \" + \"{:f}\".format(p_value_g),\n\t\t xaxis_title=\"Genotype\",\n\t\t yaxis_title=\"FA (AU)\",\n\t\t)\n\t\t#fig.update_layout(template=\"plotly_white\")\n\t\t#fig.show()\n\t\tfig.write_image(pathout+col+\"_fa_sex_rawMF.png\")\n\n\ndf_aov.to_csv(pathout+\"fa_sex_punc_t.csv\")\n\n\n","sub_path":"violins_fa_fstats_E3E4groups.py","file_name":"violins_fa_fstats_E3E4groups.py","file_ext":"py","file_size_in_byte":3778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"362233409","text":"import elasticsearch_functions\nimport send_attack_msg\nimport get_config_data\nfrom datetime import datetime\n\n\nMIN_REQUESTS_IN_ATTACK = 4\nMIN_REQUESTS_FOR_CHECK = 2\n\n\ndef get_requests():\n \"\"\"\n Return the requests.\n :param: none.\n :return: the requests.\n :rtype: array.\n \"\"\"\n requests = []\n\n try:\n with open('requests.txt', 'r') as file:\n requests = file.read().split('\\n')\n except FileNotFoundError as e:\n pass\n\n if (len(requests)) and (requests[-1] == ''):\n requests.pop()\n\n return requests\n\n\ndef count_requests(request):\n \"\"\"\n Find the last requests in requests.txt.\n :param request: the request to count.\n :type request: string.\n :return: The number of times the request appears.\n :rtype: int.\n \"\"\"\n requests = get_requests()\n return requests.count(request)\n\n\ndef get_last_user_requests():\n \"\"\"\n Find the last requests in requests.txt.\n :param: none.\n :return: the last request at this moment.\n :rtype: array.\n \"\"\"\n requests = get_requests()\n ret_requests = []\n\n if len(requests) >= MIN_REQUESTS_FOR_CHECK:\n ret_requests = [requests[-1], requests[-2]]\n\n for request in requests:\n if (request.split('<')[0] != ret_requests[0].split('<')[0]) and (\n request.split('> ')[-1] == ret_requests[0].split('> ')[-1]):\n ret_requests[1] = request\n\n return ret_requests\n\n\ndef check_request():\n \"\"\"\n Find if brute force is happening.\n :param: none.\n :return: none.\n \"\"\"\n requests_len = len(get_requests())\n print(\"Restart the requests system\")\n\n while True:\n new_requests_len = len(get_requests())\n\n if (new_requests_len > 0) and (new_requests_len > requests_len):\n requests_len = new_requests_len\n requests = get_last_user_requests()\n\n if (count_requests(requests[0]) > MIN_REQUESTS_IN_ATTACK) and (\n count_requests(requests[1]) < MIN_REQUESTS_IN_ATTACK):\n elasticsearch_functions.add_attack('BruteForce', str(requests[0].split(\"'\")[-2]))\n send_attack_msg.send_attack_msg('BruteForce', str(requests[0].split(\"'\")[-2]))\n print(f\"attack: Brute Force {requests[0]}\")\n\n\ndef add_request(request, ip):\n \"\"\"\n Add request for requests.txt.\n :param request: summary request.\n :type request: string.\n :param ip: user ip.\n :type ip: string.\n :return: none.\n \"\"\"\n server_port = get_config_data.get_server_port()\n proxy_port = get_config_data.get_proxy_port()\n\n with open(\"requests.txt\", \"a+\") as file: # requests.txt - file for the attacks detection\n file.write(f\"{datetime.now().strftime('%d/%m/%Y %H:%M:%S')}\\t{request.replace(str(proxy_port), str(server_port))} {ip}\\n\")\n\n","sub_path":"proxy/bruteForce_dos_sessionPrediction_detection.py","file_name":"bruteForce_dos_sessionPrediction_detection.py","file_ext":"py","file_size_in_byte":2811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"168634526","text":"__author__ = 'razhong'\n\n\n#old_channels = [line.rstrip('\\n') for line in open('/Users/razhong/Workspace/Data/channels/full_channels_before.txt')]\nold_channels = [line.rstrip('\\n') for line in open('/Users/razhong/Workspace/Data/locationsSql/channel.txt')]\nnew_channels = [line.rstrip('\\n') for line in open('/Users/razhong/Workspace/Data/channels/channels_uniq.txt')]\n\nold_channels_nocat = []\n\nchannel_id = dict()\nfor line in old_channels:\n values = line.split(\"\\t\")\n if values[5] == \"category\" or values[5] == \"subcategory\" or values[1]==\"1\":\n continue\n old_channels_nocat.append(values[3])\n channel_id[values[3]] = values[0]\n\njoined_channels = set(old_channels_nocat).intersection(new_channels)\njoined_channels = list(joined_channels)\njoined_channels.sort()\nwith open('/Users/razhong/Workspace/Data/channels/channels_query.edit0.txt', 'w') as output_file:\n for line in joined_channels:\n output_file.write(line + \"\\t\" + channel_id[line] + \"\\n\")\n\n","sub_path":"python/src/category/join_words_channels.py","file_name":"join_words_channels.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"581919151","text":"import pandas as pd\nimport numpy as np\nimport os \nimport re\nfrom operator import itemgetter\nfrom datetime import datetime\nimport csv\n\n\ndef prepare_data (filename):\n \n #set directory path of current script\n ospath = os.path.dirname(__file__) \n\n #specify relative path to data files\n datadir = 'data/1_output/'\n\n #path for demo purpose\n demodir = 'data/demo/'\n\n #full path to data files - !! Change demodir to datadir for running with all PDFs !!\n datapath = os.path.join(ospath, demodir)\n\n #read raw data csv\n data = pd.read_csv(datapath + filename, encoding='utf-8-sig', index_col = 0)\n\n #Preprocessing\n data ['word'] = data ['word'].astype(str)\n data ['word_low'] = data['word'].str.lower()\n\n return data\n\n\ndef create_output (data, filename):\n\n #set directory path of current script\n ospath = os.path.dirname(__file__) \n\n #specify relative path to data files\n datadir = 'data/1_output/'\n\n #path for demo purpose\n demodir = 'data/demo/'\n\n #full path to data files - !! Change demodir to datadir for running with all PDFs !!\n datapath = os.path.join(ospath, demodir) \n\n data.to_csv(datapath + filename, encoding='utf-8-sig')\n\n\ndef chap_identifier (data):\n #Add new colum for status if word is part of company name\n data['chapter'] = np.nan\n\n chapters = ['1','2','3','4','5','6','7','8',\n '1.','2.','3.','4.','5.','6.','7.','8.']\n\n chapters_start_words = [':', 'Bezeichnung','Mögliche','Zusammensetzung','Erste-Hilfe-Maßnahmen','Maßnahmen','Handhabung','Begrenzung',\n 'BEZEICHNUNG','MÖGLICHE','ZUSAMMENSETZUNG','ERSTE-HILFE-MAßNAHMEN','MAßNAHMEN','HANDHABUNG','BEGRENZUNG']\n\n\n #Beginning numbers of subchapter as stop words\n stop_list = ['1.1', '2.1', '3.1','4.1','5.1','6.1', '7.1', '8.1']\n\n #Loop through all rows in data\n for row in data.loc[:, ['word']].itertuples(index=True):\n if (row.word == 'ABSCHNITT' and ('Bold' in data.loc[row.Index,'font_name'] or int(data.loc[row.Index,'font_size']) > 10)) or (row.word in chapters and 'Bold' in data.loc[row.Index,'font_name'] and data.loc[row.Index+1,'word'] in chapters_start_words):\n #first token of chapter header\n i = 0\n\n #check until word is not in the same format and font size \n while True:\n i += 1\n \n #help variable for stop word search\n stop_word = False\n\n # if beginning of subchapter --> end of header\n for stp in stop_list:\n if stp in data.loc[row.Index + i,'word']:\n stop_word = True\n #break from inner stp loop\n break\n\n #break from outer while loop\n if stop_word == True:\n break\n\n # if word is in different format --> end of header\n if data.loc[row.Index+i,'font_name'] != data.loc[row.Index,'font_name'] or data.loc[row.Index+i,'font_size'] != data.loc[row.Index,'font_size']: \n break\n \n #Set indicator if word is part of chapter label\n data.loc[row.Index:row.Index+(i-1), 'chapter'] = 'chapter'\n return data\n\ndef subchapter_identifier (data):\n #Add new colum for status if word is part of company name\n data['subchapter'] = np.nan\n\n subschapter = ( '1.1','1.2','1.3','1.4',\n '2.1','2.2','2.3',\n '3.1','3.2',\n '1.1.','1.2.','1.3.','1.4.',\n '2.1.','2.2.','2.3.',\n '3.1.','3.2.', \n )\n\n subchapter_first_words = [['Produktidentifikator'], #1.1 -0 \n ['Relevante','identifizierte'], #1.2 -1\n ['Einzelheiten','zum'], #1.3 -2\n ['Notrufnummer'], #1.4 -3\n ['Einstufung','des'], #2.1 -4\n ['Kennzeichnungselemente'], #2.2 -5\n ['Sonstige','Gefahren'], #2.3 -6\n ['Stoffe'], #3.1 -7\n ['Gemische']] #3.2 -8\n\n subchapter_last_words = [['Produktidentifikator'], #1.1 -0 \n ['wird'], #1.2 -1\n ['bereitstellt'], #1.3 -2\n ['Notrufnummer'], #1.4 -3\n ['Gemischs','Gemisches'], #2.1 -4\n ['Kennzeichnungselemente'], #2.2 -5\n ['Gefahren'], #2.3 -6\n ['Stoffe'], #3.1 -7\n ['Gemische']] #3.2 -8\n\n #Last words of subchapter headers as stop words\n stop_list = ['Produktidentifikator','1272/2008','bereitstellt','Notrufnummer', 'Gemisches','Gemischs','Kennzeichnungselemente']\n\n\n\n #Loop through all rows in data\n for row in data.loc[:, ['word']].itertuples(index=True):\n #either subchapter numbers \n if row.word in subschapter:\n\n #first token of subchapter header \n \n i = 0\n\n #check until word is not in the same format and font size \n while True:\n i += 1\n\n #help variable for stop word search\n stop_word = False\n\n # if last word of header --> break\n for stp in stop_list:\n if stp == data.loc[row.Index + i,'word']:\n stop_word = True\n #break from inner stp loop\n break\n\n #break from outer while loop\n if stop_word == True:\n break\n\n # if word is in different format --> end of header\n # comapere with Index + 1 (skip in this case the number, since number is often smaller font size or not bold)\n if data.loc[row.Index+i,'font_name'] != data.loc[row.Index + 1,'font_name'] or data.loc[row.Index+i,'font_size'] != data.loc[row.Index + 1,'font_size']: \n #i - 1 because word with different format is not part of header\n i = i - 1\n break\n \n #Set indicator if word is part of chapter label\n data.loc[row.Index:row.Index+(i), 'subchapter'] = 'subchapter'\n\n else:\n # if no number for subchapter availabe --> look for words\n for w in range(9):\n #sometimes only one word long\n if w in (0,3,5,7,8):\n if row.word == subchapter_first_words[w][0] and 'Bold' in data.loc[row.Index,'font_name']:\n #Set indicator if word is part of chapter label\n data.loc[row.Index, 'subchapter'] = 'subchapter'\n\n # sometimes more than one word \n elif w in (1,2,4,6):\n if row.word == subchapter_first_words[w][0] and data.loc[row.Index+1,'word'] == subchapter_first_words[w][1] and 'Bold' in data.loc[row.Index,'font_name']:\n i = 0\n while True:\n i += 1\n\n #help variable for stop word search\n stop_word = False\n # if last word of header --> break\n if w == 4:\n if data.loc[row.Index+i,'word'] == subchapter_last_words[w][0] or data.loc[row.Index+i,'word'] == subchapter_last_words[w][1]:\n stop_word = True\n else:\n if data.loc[row.Index+i,'word'] == subchapter_last_words[w][0]:\n stop_word = True\n #break from outer while loop\n if stop_word == True:\n break\n\n if data.loc[row.Index+i,'font_name'] != data.loc[row.Index,'font_name'] or data.loc[row.Index+i,'font_size'] != data.loc[row.Index,'font_size']: \n #i - 1 because word with different format is not part of header\n i = i - 1\n break\n\n #Set indicator if word is part of chapter label\n data.loc[row.Index:row.Index+(i), 'subchapter'] = 'subchapter'\n\n return data\n\n\n\ndef chemicals_identifier (data):\n\n #set directory path of current script\n ospath = os.path.dirname(__file__) \n\n #specify relative path to chem dict file\n chemicals_dir = 'data/3_chemicals/'\n\n #full path to chem dict file\n chempath = os.path.join(ospath, chemicals_dir)\n\n with open(chempath + 'cas_chemical_mapping.csv') as f:\n next(f) # Skip the header\n reader = csv.reader(f, skipinitialspace=True)\n cas_mapping = dict(reader)\n \n #print(cas_mapping)\n\n data['chem'] = np.nan\n stop_list = ['ABSCHNITT','Erste-Hilfe-Maßnahmen']\n\n '''\n filter out relevant data (only data in chapter 3.2)\n '''\n\n for row in data.loc[:, ['word']].itertuples(index=True):\n\n #search for start of chapter 3.2\n if row.word == '3.2' or row.word == '3.2.' or (row.word == 'Zusammensetzung' and data.loc[row.Index + 1,'word'] == '/' and str(data.loc[row.Index + -1,'word']) == '3'):\n #first word of 3.2\n i = 0\n while True:\n i += 1\n \n #help variable for stop word search\n stop_word = False\n\n # if beginning of subchapter --> end of header\n for stp in stop_list:\n if stp == data.loc[row.Index + i,'word']:\n stop_word = True\n #break from inner stp loop\n break\n\n #break from outer while loop\n if stop_word == True:\n break\n\n #Set indicator if word is part of chapter label\n data.loc[row.Index:row.Index+(i-1), 'chapter 3.2'] = 1\n\n doc = ''\n\n \n # use regex to identify CAS numbers\n for row in data.loc[data['chapter 3.2'] == 1, ['word']].itertuples(index=True):\n #regex\n if re.match(r\"\\d{4}-\\d{2}-\\d{1}$\", str(row.word)) or re.match(r\"\\d{3}-\\d{2}-\\d{1}$\", str(row.word)) or re.match(r\"\\d{5}-\\d{2}-\\d{1}$\", str(row.word)) or re.match(r\"\\d{6}-\\d{2}-\\d{1}$\", str(row.word)):\n\n # check if start of new doc\n if data.loc[row.Index, 'doc'] != doc:\n i = 1\n doc = data.loc[row.Index, 'doc']\n\n #label cas nr according to current document\n data.loc[row.Index, 'chem'] = int(str(31) + str(i))\n\n #save cas nr\n cas_nr = str(row.word)\n #look in dict for corresponding name of cas nr\n cas_name = str(cas_mapping[cas_nr])\n\n # look for cas names in both directions\n for k in range (30):\n #current tokens for both directions\n token_down = str(data.loc[row.Index+k,'word']).lower()\n token_up = str(data.loc[row.Index-k,'word']).lower()\n\n # if token is substring of cas_name --> label as name (only if len >2 or digit)\n if token_down in cas_name.lower() and len(token_down) > 2:\n data.loc[row.Index+k, 'chem'] = int(str(32) + str(i))\n # if not: check if substrings of token with len of 3 occur in cas_name\n elif len(token_down) > 3:\n sliding_tokens = [token_down[i:i+4] for i in range(len(token_down)-3)]\n if any(token in cas_name.lower() for token in sliding_tokens):\n data.loc[row.Index+k, 'chem'] = int(str(32) + str(i))\n\n # if token is substring of cas_name --> label as name (only if len >2 or digit)\n if token_up in cas_name.lower() and len(token_up) > 2:\n data.loc[row.Index-k, 'chem'] = int(str(32) + str(i))\n # if not: check if substrings of token with len of 3 occur in cas_name\n elif len(token_up) > 3:\n sliding_tokens = [token_up[i:i+4] for i in range(len(token_up)-3)]\n if any(token in cas_name.lower() for token in sliding_tokens):\n data.loc[row.Index-k, 'chem'] = int(str(32) + str(i))\n\n #look for % and ranges in both directions\n j = 0\n\n while True:\n j += 1\n\n #sd 47, 57, 61, 82, 98\n\n if j == 100:\n break\n \n if data.loc[row.Index-2,'word'] == '-':\n data.loc[row.Index -3:row.Index -1 ,'chem'] = int(str(33) + str(i))\n break\n\n if data.loc[row.Index+2,'word'] == '-':\n #print(row.Index)\n data.loc[row.Index +1:row.Index + 3 ,'chem'] = int(str(33) + str(i))\n break\n\n if data.loc[row.Index+1,'word'] == '%':\n data.loc[row.Index + 3, 'chem'] = int(str(33) + str(i))\n break\n\n if data.loc[row.Index+j,'word'] == '<':\n data.loc[row.Index +j:row.Index +j + 1, 'chem'] = int(str(33) + str(i))\n break\n\n #look for % ratio around CAS number\n if data.loc[row.Index-j,'word'] == '%':\n data.loc[row.Index -j, 'chem'] = int(str(33) + str(i))\n\n #Look for all the digits assigned to the % symbol\n t = 0\n while True:\n t += 1\n # stop if '<'\n if data.loc[row.Index -j -t, 'word'] == '<' or '≤':\n # if <, check if token before '-' --> also range --> take token before as well\n if data.loc[row.Index -j -t - 1, 'word'] == '-':\n data.loc[row.Index - j - t - 2:row.Index-j, 'chem'] = int(str(33) + str(i))\n else:\n data.loc[row.Index - j - t:row.Index-j, 'chem'] = int(str(33) + str(i))\n break\n # i '-' found: range --> label until one token before '-'\n if data.loc[row.Index -j -t, 'word'] == '-':\n data.loc[row.Index - j - (t+1):row.Index-j, 'chem'] = int(str(33) + str(i))\n break\n #stop is looking for more than 10 tokens\n if t == 10:\n data.loc[row.Index -j -1, 'chem'] = int(str(33) + str(i))\n break\n break\n\n if data.loc[row.Index+j,'word'] == '%':\n data.loc[row.Index +j, 'chem'] = int(str(33) + str(i))\n\n #Look for all the digits assigned to the % symbol\n t = 1 \n while True:\n # if '<' \n if data.loc[row.Index +j -t, 'word'] == '<' or '≤':\n # if <, check if token before '-' --> also range --> take token before as well\n if data.loc[row.Index +j -t - 1, 'word'] == '-':\n data.loc[row.Index + j - t - 2:row.Index+j, 'chem'] = int(str(33) + str(i))\n else:\n data.loc[row.Index + j - t:row.Index+j, 'chem'] = int(str(33) + str(i))\n break\n # i '-' found: range --> label until one token before '-'\n if data.loc[row.Index +j -t, 'word'] == '-':\n data.loc[row.Index + j - (t+1):row.Index-j, 'chem'] = int(str(33) + str(i))\n break\n\n #stop is looking for more than 10 tokens\n if t == 10:\n data.loc[row.Index +j -1, 'chem'] = int(str(33) + str(i))\n break\n t += 1\n break\n i += 1\n\n return data\n\n\ndef company_identifier (data):\n #Labels\n legal_forms = ['gmbh', 'ug', 'ag', 'gbr', 'e.k.', 'ohg', 'ohg', 'kg', 'se', 'lp', 'llp', 'llp', 'lllp', 'llc', 'lc', 'ltd. co', 'pllc', 'corp.', 'inc.', 'corp', 'inc', 'kluthe', 's.l.', 'bvba']\n stop_list = ['firmenname', 'firmenbezeichnung','lieferanschrift', 'lieferant', 'der', ':', ')', 'zum', 'hersteller', 'inverkehrsbringer', '*', 'firma']\n exclusion_list = ['vergiftungsinformationszentrale', 'tüv', 'website', 'gbk', '@', 'www']\n\n #Add new column for company name\n data['company_name'] = np.nan\n #Add new colum for status if word is part of company name\n data['company'] = np.nan\n\n #Loop through all rows in data\n for row in data.loc[data['Page'] <= 2, ['word_low']].itertuples(index=True):\n #search for legal form\n for lf in legal_forms:\n if lf in row.word_low:\n #start from here building string\n company_str = row.word_low\n i = 0\n\n #help variable for exluded string\n excluded_str =False\n\n #check for words in the same line\n while True:\n i += 1\n #help variable for stop word search\n stop_word = False\n #check line of previous word\n if data.loc[row.Index-i,'ycord_average'] != data.loc[row.Index,'ycord_average']:\n break\n #check stop list\n for stp in stop_list:\n if stp in data.loc[row.Index-i,'word_low']:\n stop_word = True\n #break from inner stp loop\n break\n #check exclusion list\n for el in exclusion_list: \n if (data.loc[row.Index-i,'word_low'].find(el) != -1) or (company_str.find(el) != -1):\n excluded_str = True\n #break from inner el loop\n break\n \n #break from outer while loop\n if stop_word == True:\n break\n #Add word to string\n company_str += ' ' + data.loc[row.Index-i,'word_low']\n \n if excluded_str == False:\n #Reverse order of strings\n company_str_r = ' '.join(company_str.split(\" \")[-1::-1])\n #Set value for extracted company name\n data.loc[row.Index-i+1:row.Index, 'company_name'] = company_str_r\n #Set indicator if word is part of company name\n data.loc[row.Index-i+1:row.Index+1, 'company'] = 'company'\n #break from outer lf loop \n break\n return data\n\n\ndef date_identifier (data):\n #Labels\n date_labels = {\n #1. Druckdatum/Erstellung\n 'printdate': ['druck', 'ausgabe', 'ausstellung', 'erstellung', 'sd-datum', 'erstellt', 'ausgestellt'],\n #2. Überarbeitungsdatum\n 'revisiondate': ['überarbeit', 'änderung', 'revision', 'bearbeitung', 'quick-fds'],\n #3. Datum alte Version\n 'oldversiondate': ['ersetzt', 'ersatz', 'fassung', 'letzten'],\n #4. Gültigkeitsdatum\n 'validdate': ['kraft', 'freigabe'],\n #5. Negative Exceptions\n np.nan: ['sblcore', 'artikel']\n #6. All other not explicit listed cases are also printdate\n }\n\n #Add new columns for feature generation\n data['date_nr'] = np.nan\n data['date_string'] = np.nan\n data['date'] = np.nan\n data['date_stopword'] = np.nan\n\n #Filter out special characters for simpler trigger detection\n # & (data['Page'] == 1)\n data_iter = pd.DataFrame(data.loc[(data['special_char'] <1) & (data['Page'] == 1)])\n #Update work index + save old index\n data_iter.reset_index(inplace=True)\n\n\n #Iterrate through dataframe\n for row in data_iter.itertuples(index=True):\n \n # Catch exception with subchapter numbers\n if row.word_low.endswith('.0'):\n continue\n for fmt in (#all short/long combinations with dot format\n '%d.%m.%Y', '%d.%m.%y', '%w.%m.%Y', '%w.%m.%y', '%d.%-m.%Y', '%d.%-m.%y','%w.%-m.%Y','%w.%-m.%y', \n '%Y.%m.%d', '%y.%m.%d', '%Y.%m.%w', '%y.%m.%w', '%Y.%-m.%d', '%y.%-m.%d','%Y.%-m.%w','%y.%-m.%w',\n #all short/long combinations with hyphen format\n '%d-%m-%Y', '%d-%m-%y', '%w-%m-%Y', '%w-%m-%y', '%d-%-m-%Y', '%d-%-m-%y','%w-%-m-%Y','%w-%-m-%y', \n '%Y-%m-%d', '%y-%m-%d', '%Y-%m-%w', '%y-%m-%w', '%Y-%-m-%d', '%y-%-m-%d','%Y-%-m-%w','%y-%-m-%w',\n #all short/long combinations with slash format\n '%d/%m/%Y', '%d/%m/%y', '%w/%m/%Y', '%w/%m/%y', '%d/%-m/%Y', '%d/%-m/%y','%w/%-m/%Y','%w/%-m/%y',\n '%Y/%m/%d', '%y/%m/%d', '%Y/%m/%w', '%y/%m/%w', '%Y/%-m/%d', '%y/%-m/%d','%Y/%-m/%w','%y/%-m/%w',\n #all integer/text combinations \n '%d. %b %y', '%d %b %y', '%d. %B %y', '%d %B %y', '%w. %b %y', '%w %b %y', '%w. %B %y', '%w %B %y'\n ):\n try:\n s = str(row.word_low)\n \n #Try to parse string in date\n date = datetime.strptime(s, fmt).date()\n\n #Prevent picking wrong dates\n if date < date.today():\n org_index = int(data_iter.loc[row.Index, 'index'])\n data.loc[org_index, 'date_nr'] = date\n #Catch 5 words before date\n date_str = ''\n for i in range(5,0,-1): \n date_str += data_iter.loc[row.Index-i, 'word_low'] + ' '\n data.loc[org_index, 'date_string'] = date_str\n\n #search in string for label key words\n temp = []\n for key, value in date_labels.items ():\n for i in value:\n temp.append((key, date_str.find(i)))\n #if substring was found value is >= 0\n if max(temp, key=itemgetter(1))[1] >= 0:\n date_label = max(temp, key=itemgetter(1))[0]\n\n else:\n date_label = 'printdate'\n # create label in working csv\n data.loc[org_index, 'date'] = date_label\n\n # Add stopword label\n stop = False\n for i in range (1,6):\n if stop == True:\n break\n for key, value in date_labels.items ():\n for j in value:\n if data_iter.loc[row.Index-i, 'word_low'].find(j) != -1:\n data.loc[int(data_iter.loc[row.Index-i, 'index']),'date_stopword'] = key\n stop = True\n break\n\n except (ValueError, TypeError) as e:\n continue\n\n return data\n\n\ndef directive_identifier (data):\n #Labels\n reach_id = ['1907/2006', '2015/830']\n\n #Add new colum for reach status\n data['directive'] = np.nan\n\n\n #Loop through all rows in data\n for row in data.loc[data['Page'] <= 2, ['word_low']].itertuples(index=True):\n #search for reach_id\n for rid in reach_id:\n if row.word_low.find(rid) != -1:\n data.loc[row.Index, 'directive'] = 'directive'\n\n return data\n\n\ndef signal_identifier (data):\n #Labels\n reach_id = ['signalwort']\n reach_range = ['achtung', 'warnung', 'gefahr'] #,'entfällt'\n\n\n #Add new colum for reach status\n data['signal'] = np.nan\n\n #Filter out special characters for simpler trigger detection\n data_iter = pd.DataFrame(data.loc[data['special_char'] <1])\n #Update work index + save old index\n data_iter.reset_index(inplace=True)\n\n #Loop through all rows in data\n for row in data_iter.itertuples(index=True):\n #search for reach_id\n for rid in reach_id:\n if row.word_low.find(rid) != -1:\n i = 0\n keepsearching = True\n while keepsearching:\n i +=1\n signal = data_iter.loc[row.Index+i, 'word_low']\n for rng in reach_range:\n if rng == signal:\n temp = int(data_iter.loc[row.Index+i, 'index'])\n data.loc[temp, 'signal'] = 'signal'\n keepsearching = False\n return data\n\n\ndef usecase_identifier (data):\n # Detects start and end of the whole usecase part\n usecase_start = ['abgeraten wird']\n usecase_stop = ['einzelheiten zum', \n '1.3. angaben',\n '1.3 angaben',\n 'angaben des',\n '1.3 hersteller',\n '1.3 nationaler'\n ]\n\n # Detects the starting points within the usecasepart of pro usecases (=1) and con usecases (=0)\n trigger_start = [\n ('empfohlene r verwendungszweck e', 1),\n ('empfohlene verwendung', 1),\n ('verwendungen des stoffs oder gemischs', 1),\n ('verwendung des stoffes des gemischs',1),\n ('verwendung des stoffs des gemischs', 1),\n ('verwendung des stoffes oder des gemisches', 1), \n ('verwendung des stoffs oder des gemischs', 1),\n ('verwendung des stoffes oder gemisches', 1), \n ('verwendung des stoffs oder gemischs', 1),\n ('verwendung des stoffes des gemisches', 1), \n ('verwendung des stoffs des gemischs',1),\n ('identifizierte verwendungen', 1),\n ('identifizierte anwendungen', 1),\n #('funktions- oder verwendungskategorie', 1),\n ('relevante verwendungen', 1),\n #('verwendungssektor', 1),\n ('bestimmte verwendung der mischung',1),\n ('vorgesehene verwendung', 1),\n ('nicht empfohlene verwendung der mischung', 0),\n ('verwendungen von denen abgeraten wird', 0),\n ('nicht vorgesehene verwendung', 0),\n ('abgeratene verwendungen', 0),\n ('abgeratene anwendungen', 0)\n ]\n # Detects the ending points within the usecasepart of pro usecases (=1) and con usecases (=0) // Note: new usecase introduction could be endingpoint of previous usecase section\n\n ''' 'empfohlene r verwendungszweck e',\n 'empfohlene verwendung',\n 'verwendungen des stoffs oder gemischs',\n 'verwendung des stoffs des gemischs',\n 'verwendung des stoffes oder des gemisches', \n 'verwendung des stoffs oder des gemischs',\n 'verwendung des stoffes oder gemisches', \n 'verwendung des stoffs oder gemischs',\n 'verwendung des stoffes des gemisches', \n 'verwendung des stoffs des gemischs',\n 'identifizierte verwendungen',\n 'identifizierte anwendungen',\n 'funktions- oder verwendungskategorie',\n 'relevante verwendungen',\n 'verwendungssektor', '''\n\n trigger_end = [\n #'verwendungssektor',\n 'nicht empfohlene verwendung der mischung',\n 'verwendungen von denen abgeraten wird',\n 'vorgesehene verwendung',\n 'abgeratene verwendungen',\n 'abgeratene anwendung',\n\n #'produktkategorie', \n #'kontaktieren sie ihren lieferanten für weitere informationen',\n #'es sind keine verwendungen bekannt',\n #'zur zeit liegen keine Informationen hierzu vor',\n 'wirkung des stoffes',\n #'keine weitere information vorhanden', \n #'keine weitere information vorhanden',\n #'keine weiteren relevanten informationen verfügbar',\n #'keine bekannt',\n #'bestimmt für die allgemeinheit',\n #'hauptverwendungskategorie',\n #'zur zeit',\n '1.2.2',\n '1.3',\n 'einzelheiten zum',\n 'angaben des lieferanten',\n ]\n\n #Add new column for part string\n data['usecase_part'] = np.nan\n #Add new column for usecase\n data['usecase'] = np.nan\n\n\n\n #Filter out special characters for simpler trigger detection\n data_iter = pd.DataFrame(data.loc[data['special_char'] <1])\n #Update work index + save old index\n data_iter.reset_index(inplace=True)\n\n #start with index 1 because of sliding window (range = 2)\n index = 1\n\n length = len(data_iter['index'])-1\n\n while index < length:\n \n row = data_iter.loc[index, :]\n\n yo = row.doc\n\n # Sliding window of start point\n start_str = data_iter.loc[index-1, 'word_low'] + ' ' + row.word_low\n\n #search for starting point\n for start in usecase_start:\n if start in start_str:\n #start from here building string\n usecase_str = ''\n\n #add words till stop\n i = index\n while True:\n i += 1\n recording = True\n stop_str = data_iter.loc[i+1,'word_low'] + ' ' + data_iter.loc[i+2,'word_low']\n for stop in usecase_stop:\n if stop in stop_str:\n recording=False\n if recording == False:\n break\n usecase_str = usecase_str + ' ' + data_iter.loc[i, 'word_low']\n # search corresponding index of unfiltered dataframe\n temp1 = int(data_iter.loc[i, 'index'])\n temp4 = int(data_iter.loc[index, 'index'])\n # add usecase string with the whole part to last index of part\n data.loc[temp4+1:temp1, 'usecase_part'] = usecase_str\n \n \n #start searching for usecases from this position\n detect_start = ''\n keepsearching = True\n end_index = 0\n #j = index+1\n j = index\n while j < i:\n keepsearching = True\n # build string\n detect_start = detect_start + ' ' + data_iter.loc[j, 'word_low']\n j +=1\n for trig_st in trigger_start:\n #if trigger was found start searching for the end of this (sub-)part\n if detect_start.find(trig_st[0]) != -1:\n detect_end = ''\n k = j\n while keepsearching:\n k +=1\n detect_end = detect_end + ' ' + data_iter.loc[k, 'word_low']\n\n for trig_en in trigger_end:\n # if end trigger was found retunr last index of part\n if detect_end.find(trig_en) != -1:\n # last index of part is overall number of words in the string minus the length of the trigger\n end_index = k-len(trig_en.split())\n # convert the found range in the range of the unprocessed dataframe\n temp2 = int(data_iter.loc[j, 'index'])\n temp3 = int(data_iter.loc[end_index, 'index'])\n # check if part is pro or con usecase\n if trig_st[1] == 1:\n helpcheck = data_iter.loc[j, 'word_low'] + ' ' + data_iter.loc[j+1, 'word_low']\n if helpcheck == 'des stoffs':\n temp2 = int(data_iter.loc[j+4, 'index'])\n data.loc[temp2:temp3, 'usecase'] = 'usecase_pro'\n else:\n data.loc[temp2:temp3, 'usecase'] = 'usecase_con'\n else:\n data.loc[temp2:temp3, 'usecase'] = 'usecase_con'\n j = end_index\n keepsearching = False\n detect_start = ''\n break\n break\n #j +=1\n \n index = j\n break\n index +=1\n\n return data\n\n\ndef version_identifier (data):\n #get list of all words\n words = list(data['word'])\n\n version_dict = {\n 'Versionsnummer', 'Version', 'Versionsnummer:' , 'Version:' , 'Revisions-Nr:' , 'Revisions-Nr',\n 'Revisions-nr' , 'Revisionsnummer' , 'Revisionsnummer:', 'Rev-Nr:' , 'Version-Nr'\n }\n\n l1 = []\n\n for c in version_dict:\n\n for i, e in enumerate(words): \n if words[i-1] == c:\n if words[i] == '(': # Versionsnummer und in Klammern alte Versionsnummer\n l1.append(i+4)\n elif words[i] == ':': # wenn zwischen Version und : Leerzeichen vorkommmt -> naechstes \n if words[i-4]== 'Überarbeitet':\n l1.append(i+3)\n elif words[i-5] != 'Ersetzt' and words[i-3] != 'Ersetzt':\n l1.append(i+1)\n elif words[i] == '.': \n l1.append(i+2)\n else:\n l1.append(i)\n \n # fill in identified labels in data\n data['version'] = np.nan\n for j in l1:\n data.loc[j,'version'] = 'version'\n\n return data\n\n\ndef combine_labels (data):\n \n labels = [\n 'chapter',\n 'subchapter',\n 'date',\n 'version',\n 'directive',\n 'signal',\n 'usecase',\n 'chem',\n 'company' \n ]\n \n #data['label'] = np.nan\n data['label'] = ''\n\n for l in labels:\n data[\"label\"] = data[\"label\"] + data.loc[:, l].astype(str).replace('nan', '')\n \n #for l in labels:\n #data['label'] = data['label'].str.cat(data[l])\n\n\n '''\n for row in data.loc[:, ['label']].itertuples(index=True):\n for label in labels:\n temp = data.loc[row.Index, label]\n if pd.isna (temp) == False:\n data.loc[row.Index, 'label'] = temp\n break\n '''\n \n '''\n data['label'] = np.nan\n \n data['nr_labels'] = np.nan\n\n data['nr_labels'] = data[labels].apply(lambda x: x.notnull().sum(), axis='columns')\n\n data['label'] = data[labels].max(1) \n \n #data = data.drop(labels, 1)\n '''\n \n return data\n\ndef main ():\n \n data = prepare_data('01_data.csv')\n \n #Select identifiers to run\n identifier = [\n chap_identifier, \n subchapter_identifier, \n date_identifier,\n version_identifier, \n directive_identifier, \n signal_identifier, \n usecase_identifier, \n chemicals_identifier,\n company_identifier \n ]\n \n for i in identifier:\n print ('********** Start: ' + i.__name__ + ' **********')\n \n data = i (data)\n\n print ('********** End: ' + i.__name__ + ' **********')\n \n combine_labels(data)\n \n create_output(data, '02_data.csv')\n\n \nif __name__ == '__main__':\n main()\n","sub_path":"2_labeling_scripts.py","file_name":"2_labeling_scripts.py","file_ext":"py","file_size_in_byte":37171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"176667871","text":"# -*- coding: utf-8 -*-\nfrom openerp.osv import osv,fields\nfrom datetime import date\nfrom openerp.tools.translate import _\n\nclass product_product(osv.osv):\n _name = \"product.product\"\n _inherit = \"product.product\"\n\n def search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False):\n context = context or {}\n if context.get('search_default_elote_id'):\n #\n # Por razones no identificadas, esta forma de filtrar los productos dejó de funcionar, posiblemente al agregar\n # la opción de pedidos directos, por lo que se cambió a un query previo\n #\n # args.append((('id', 'inselect',('select PP.id from product_product PP left join product_supplierinfo SI on (SI.product_tmpl_id = PP.product_tmpl_id) where SI.lote_id = %s',\n # context['search_default_elote_id']))))\n\n lot_ids = []\n cr.execute(\n 'select PP.id from product_product PP left join product_supplierinfo SI on (SI.product_tmpl_id = PP.product_tmpl_id) where SI.lote_id = %s',\n (context['search_default_elote_id'],))\n for id in cr.dictfetchall():\n lot_ids.append(id[\"id\"])\n args.append((('id', 'in', lot_ids)))\n if context.get('search_default_partner_id'):\n #\n # Por razones no identificadas, esta forma de filtrar los productos dejó de funcionar, posiblemente al agregar\n # la opción de pedidos directos, por lo que se cambió a un query previo\n #\n # args.append((('id', 'inselect',('select PP.id from product_product PP left join product_supplierinfo SI on (SI.product_tmpl_id = PP.product_tmpl_id) where SI.name = %s',\n # context['search_default_partner_id']))))\n\n lot_ids = []\n cr.execute(\n 'select PP.id from product_product PP left join product_supplierinfo SI on (SI.product_tmpl_id = PP.product_tmpl_id) where SI.name = %s',\n (context['search_default_partner_id'],))\n for id in cr.dictfetchall():\n lot_ids.append(id[\"id\"])\n args.append((('id', 'in', lot_ids)))\n return super(product_product, self).search(cr, uid, args, offset=offset, limit=limit, order=order, context=context, count=count)\nproduct_product()\n\nclass product_supplierinfo(osv.osv):\n _name = \"product.supplierinfo\"\n _inherit = \"product.supplierinfo\"\n\n _columns = {\n 'lote_id': fields.many2one('elote.lote', 'Lote'),\n }\nproduct_supplierinfo()\n\nclass elote_lote(osv.osv):\n _name = \"elote.lote\"\n _description = \"Lot Administration class\"\n _inherit = [ 'mail.thread' ]\n\n def _count_all(self, cr, uid, ids, field_name, arg, context=None):\n return {\n obj.id: {\n 'supplier_ids_count': len(obj.supplier_ids),\n 'order_ids_count': len(obj.order_ids),\n }\n for obj in self.browse(cr, uid, ids, context=context)\n }\n\n _columns = {\n 'name': fields.char('Name',size=32),\n 'sequence_nbr': fields.char('Sequence Nbr',size=32),\n 'date_start': fields.date('Start Date'),\n 'date_end': fields.date('Start End'),\n 'state': fields.selection([\n ('draft','Draft'),('open','Open'),('cancel','Cancel'),('close','Closed')\n ],'State'),\n 'user_ids': fields.many2many('res.users', 'res_user_rel', 'lote_id', 'user_id', 'Users'),\n 'supplier_ids': fields.one2many('product.supplierinfo', 'lote_id', 'Supplier Info'),\n 'supplier_ids_count': fields.function(_count_all, type='integer', string=_(\"Supplier Info Count\"), multi='_count_all', store=False),\n 'order_ids': fields.one2many('purchase.order', 'lote_id', 'Purchase Orders'),\n 'order_ids_count': fields.function(_count_all, type='integer', string=_(\"Purchase Order Count\"), multi='_count_all', store=False),\n 'direct': fields.boolean('Direct orders lot'),\n }\n\n _defaults = {\n 'state': 'draft'\n }\n\n def do_open(self, cr, uid, ids, context=None):\n this = self.browse(cr, uid, ids)[0]\n lote_ids = self.search(cr, uid, [('state','=','open'),('direct','=',this.direct)])\n if lote_ids:\n raise osv.except_osv('Error!', 'Ya existe un lote abierto.\\nCierrelo antes de abrir uno nuevo.')\n self.write(cr, uid, ids, {'state': 'open'})\n return True\n\n def do_close(self, cr, uid, ids, context=None):\n self.write(cr, uid, ids, {'state': 'close'})\n return True\n\n def do_cancel(self, cr, uid, ids, context=None):\n self.write(cr, uid, ids, {'state': 'cancel'})\n return True\n\n def do_draft(self, cr, uid, ids, context=None):\n self.write(cr, uid, ids, {'state': 'draft'})\n return True\n\n def _check_dates(self, cr, uid, ids, context=None):\n obj = self.browse(cr, uid, ids[0], context=context)\n if obj.date_start > obj.date_end:\n return False\n return True\n\n _constraints = [\n (_check_dates, 'Start date should be less than end date.', ['date_start','date_end']),\n ]\n\n\n def view_supplier_ids(self, cr, uid, ids, context=None):\n res_id = ids and ids[0] or False\n return {\n 'name': _('Supplier Info'),\n 'domain': [('lote_id', 'in', ids)],\n 'res_model': 'product.supplierinfo',\n 'type': 'ir.actions.act_window',\n 'view_id': False,\n 'view_mode': 'tree,form',\n 'view_type': 'form',\n 'limit': 80,\n 'context': \"{'default_lote_id': %s}\" % (res_id)\n }\n\n def view_order_ids(self, cr, uid, ids, context=None):\n res_id = ids and ids[0] or False\n return {\n 'name': _('Purchase Orders'),\n 'domain': [('lote_id', 'in', ids)],\n 'res_model': 'purchase.order',\n 'type': 'ir.actions.act_window',\n 'view_id': False,\n 'view_mode': 'tree,form',\n 'view_type': 'form',\n 'limit': 80,\n 'context': \"{'default_lote_id': %s}\" % (res_id)\n }\n\n def catalog_import(self, cr, uid, ids, context=None):\n return {\n 'name': _('Catalog Import'),\n 'res_model': 'elote.catalog_import',\n 'type': 'ir.actions.act_window',\n 'view_mode': 'form',\n 'view_type': 'form',\n 'target': 'new',\n 'context': \"{'default_lote_id': %s}\" % (ids[0])\n }\n\nelote_lote()\n","sub_path":"elote_lote/lote.py","file_name":"lote.py","file_ext":"py","file_size_in_byte":6639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"80241256","text":"import os\n\n\n#PART 1 - найти длину ключа = 16 символов\ni = 2\n\nf = open('decrypt_text.txt', 'r')\ntext = f.read()\nf.close\n\n#print(text)\n\ntmp = ''\n\nwhile (i < 25):\n j = 0\n while (j < len(text)):\n tmp = tmp + text[j]\n #print(j)\n j = j + i\n f = open('tmp.txt', 'w')\n f.write(tmp)\n f.close\n print(i)\n os.system('python3 index.py')\n print(tmp)\n print('-----')\n i = i + 1\n tmp = ''\n'''\n#PART 2 - расшифровка\n\nf = open('tmp.txt', 'r')\ntext = f.read()\nf.close\n\ntmp = ''\ni = 0\nwhile (i < 21):\n j = i\n while (j < len(text)):\n tmp = tmp + text[j]\n j = j + 21\n i = i + 1\nprint(tmp)\n'''\n","sub_path":"cp_2/panchuk_fb-74_opanasyuk_fb-74_cp_2/decrypt.py","file_name":"decrypt.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"203860065","text":"import tensorflow as tf\n\n# gkrtmq data\nx1_data = [1, 0, 3, 0, 5]\nx2_data = [0, 2, 0, 4, 0]\ny_data = [1, 2, 3, 4, 5]\n\n# SET VARIABLES\nW1 = tf.Variable(tf.random_uniform([1], -1.0, 1.0))\nW2 = tf.Variable(tf.random_uniform([1], -1.0, 1.0))\n\nb = tf.Variable(tf.random_uniform([1], -1.0, 1.0))\n\n# Our hypothesis\nhypothesis = W1 * x1_data + W2 * x2_data + b\n\ncost = tf.reduce_mean(tf.square(hypothesis - y_data))\n\n# Minimize\noptimizer = tf.train.GradientDescentOptimizer(0.1)\n\ntrain = optimizer.minimize(cost)\n\ninit = tf.global_variables_initializer()\n\n# Launch grapth\nsess = tf.Session()\nsess.run(init)\n\nfor step in xrange(2001):\n sess.run(train)\n if step % 20 == 0:\n print(step, sess.run(cost), sess.run(W1), sess.run(W2), sess.run(b))\n","sub_path":"ch04/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"545708619","text":"from gamegrid import *\nfrom java.awt import Point\n\n# ---------------- class Animal ----------------\nclass Animal():\n def __init__(self, imgPath): \n self.imagePath = imgPath \n def showMe(self, x, y): \n bg.drawImage(self.imagePath, x, y)\n\n# ---------------- class Pet ----------------\nclass Pet(Animal): # Von Animal abgeleitete Klasse - man kann auch mehrere Basisklassen verwenden.\n def __init__(self, imgPath, name): \n self.imagePath = imgPath \n self.name = name #Instanzvariable für den Namen\n def tell(self, x, y): #Zusätzliche Methode, um den Namen anzuzeigen\n bg.drawText(self.name, Point(x, y))\n\nmakeGameGrid(600, 600, 1, False)\nsetBgColor(Color.green)\nshow()\ndoRun()\nbg = getBg()\nbg.setPaintColor(Color.black)\n\nfor i in range(5):\n myPet = Pet(\"sprites/pet.gif\", \"Trixi\")\n myPet.showMe(50 + 100 * i, 100) #Methode ist gar nicht in Pet definiert. Aber pet ist auch ein animal (is-a-relation zwischen pet und animal)\n myPet.tell(72 + 100 * i, 145)\n\n","sub_path":"Theorie/OOP/OOP_Programm5_Vererbung.py","file_name":"OOP_Programm5_Vererbung.py","file_ext":"py","file_size_in_byte":1015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"344520804","text":"import datetime as dt\nimport unittest\n\nfrom AShareData.WindData import WindWrapper\n\n\nclass MyTestCase(unittest.TestCase):\n def setUp(self) -> None:\n self.w = WindWrapper()\n self.w.connect()\n\n def test_index_constitute(self):\n hs300_constitute = self.w.get_index_constitute(index='000300.SH')\n print(hs300_constitute)\n\n def test_wsd(self):\n rnd_close = self.w.wsd(['000001.SZ', '000002.SZ'], 'close', '2019-07-01', '2019-07-10', '')\n print('\\n')\n print(rnd_close)\n\n def test_wss(self):\n # data = self.w.wss(['000001.SZ', '000002.SZ', '000005.SZ'], ['SHARE_RTD_STATE', 'SHARE_RTD_STATEJUR'],\n # trade_date='20190715', unit='1')\n # print('\\n')\n # print(data)\n\n data = self.w.wss(['000001.SZ', '000002.SZ', '000005.SZ'], \"open,low,high,close,volume,amt\",\n date='20190715',\n priceAdj='U', cycle='D')\n print('\\n')\n print(data)\n\n # data = self.w.wss(\"000001.SH,000002.SZ\", \"grossmargin,operateincome\", \"unit=1;rptDate=20191231\")\n # print('\\n')\n # print(data)\n\n def test_wset(self):\n data = self.w.wset(\"futurecc\", startdate='2019-07-29', enddate='2020-07-29', wind_code='A.DCE')\n print('\\n')\n print(data)\n\n start_date = dt.date(2020, 6, 30).strftime('%Y-%m-%d')\n end_date = dt.date(2020, 7, 30).strftime('%Y-%m-%d')\n exchange = 'sse'\n wind_code = '510050.SH'\n status = 'all'\n field = 'wind_code,trade_code,sec_name'\n data = self.w.wset(\"optioncontractbasicinfo\", options=f'field={field}', startdate=start_date, enddate=end_date,\n status=status, windcode=wind_code, exchange=exchange)\n print('\\n')\n print(data)\n\n def test_wsq(self):\n data = self.w.wsq('002080.SZ,000002.SZ', 'rt_latest,rt_vol')\n print('\\n')\n print(data)\n data = self.w.wsq('000002.SZ', 'rt_latest,rt_vol')\n print('\\n')\n print(data)\n data = self.w.wsq('002080.SZ,000002.SZ', 'rt_latest')\n print('\\n')\n print(data)\n data = self.w.wsq('000002.SZ', 'rt_latest')\n print('\\n')\n print(data)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/wind_wrapper_test.py","file_name":"wind_wrapper_test.py","file_ext":"py","file_size_in_byte":2291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"449001523","text":"from dcs.planes import MiG_15bis, IL_76MD, IL_78M, An_26B, An_30M, Yak_40\nfrom dcs.ships import CV_1143_5_Admiral_Kuznetsov, Bulk_cargo_ship_Yakushev, Dry_cargo_ship_Ivanov, Tanker_Elnya_160\nfrom dcs.vehicles import AirDefence, Armor, Unarmed, Infantry, Artillery\n\nRussia_1955 = {\n \"country\": \"Russia\",\n \"side\": \"red\",\n \"units\": [\n MiG_15bis,\n\n IL_76MD,\n IL_78M,\n An_26B,\n An_30M,\n Yak_40,\n\n AirDefence.AAA_ZU_23_Closed,\n AirDefence.AAA_ZU_23_on_Ural_375,\n Armor.ARV_BRDM_2,\n Armor.FDDM_Grad,\n Armor.APC_MTLB,\n Armor.MBT_T_55,\n Artillery.MLRS_BM_21_Grad,\n\n Unarmed.Transport_Ural_375,\n Unarmed.Transport_UAZ_469,\n\n CV_1143_5_Admiral_Kuznetsov,\n Bulk_cargo_ship_Yakushev,\n Dry_cargo_ship_Ivanov,\n Tanker_Elnya_160,\n\n # Infantry squad\n Infantry.Paratrooper_AKS,\n Infantry.Infantry_Soldier_Rus,\n Infantry.Paratrooper_RPG_16,\n ]\n}","sub_path":"game/factions/russia_1955.py","file_name":"russia_1955.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"276428320","text":"import turtle, random\r\n\r\n#sets the window\r\nturtle.screensize(canvwidth=1000, canvheight=1000)\r\n\r\n#input the number of rows, size of the squares, and number of petals\r\nrowNumber = int(turtle.numinput(\"Colorful Chessboard and Flower\", \"Enter the number of rows : \",\r\n default=3, minval=3, maxval=10)) #Input The Number Of Rows\r\nsquareSize = int(turtle.numinput(\"Colorful Chessboard and Flower\", \"Enter the square size: \",\r\n default=5, minval=5, maxval=int(500/rowNumber))) #InputThe Square Size\r\npetals = int(turtle.numinput(\"Colorful Chessboard and Flower\", \"Enter the number of petals of the flower : \",\r\n default=3, minval=1, maxval=360)) #InputThe Number Of Petals\r\nnormalizer = int((squareSize*rowNumber)/2) #For Helping Turtle Placement\r\n\r\nturtle.title(\"Colorful Chessboard and Flower\") #Sets The Title Of The Program\r\n\r\n#speed and preparation so everything's centered\r\nturtle.hideturtle() #Hide The Turtle So It Looks Good\r\nturtle.speed('fastest')\r\nturtle.penup()\r\nturtle.goto(-normalizer,normalizer)\r\nturtle.pendown()\r\n\r\n#define the function for the random color\r\nturtle.colormode(255) \r\ndef colorize():\r\n r = int(random.randrange(0, 255)) #SetColorRed\r\n g = int(random.randrange(0, 255)) #SetColorGreen\r\n b = int(random.randrange(0, 255)) #SetColorBlue\r\n return r, g, b\r\n\r\n#Colorful chessboard\r\nfor a in range(0,rowNumber): #DoesTheMovementToTheNextRow\r\n for b in range (0,rowNumber): #DoesTheMovementToTheNextSquare\r\n turtle.color(colorize())\r\n turtle.begin_fill() \r\n for c in range(0,4): #DoesTheMovementToDrawTheSquare\r\n turtle.forward(squareSize)\r\n turtle.right(90)\r\n turtle.bgcolor(colorize())\r\n turtle.forward(squareSize)\r\n turtle.end_fill()\r\n turtle.penup()\r\n turtle.right(90)\r\n turtle.forward(squareSize)\r\n turtle.right(90)\r\n turtle.forward(squareSize*rowNumber)\r\n turtle.right(180)\r\n turtle.pendown()\r\n turtle.bgcolor('white')\r\n\r\n#movement to the petal so it's centered above the box\r\nturtle.penup()\r\nturtle.goto(0,(100+int(squareSize*rowNumber/2)))\r\nturtle.pendown()\r\n\r\n\r\n#Flower\r\nfor d in range(0, petals):\r\n turtle.pencolor(colorize())\r\n turtle.circle(60,75)\r\n turtle.left(105)\r\n turtle.circle(60,75)\r\n turtle.left(105-(360/petals))\r\n turtle.bgcolor(colorize())\r\nturtle.bgcolor('white')\r\n\r\n\r\n#Text\r\nturtle.penup()\r\nturtle.goto(0,(-(normalizer)-75)) #Moves The Turtle To Position\r\nturtle.pencolor(\"blue\")\r\nturtle.write((\"Colorful Chessboard of \" + str(rowNumber**2) + #The Text\r\n \" Squares and Flower of \" + str(petals) + \" Petals\"),\r\n move=False, align=\"Center\", font=(\"Arial\", 16, \"normal\"))\r\n\r\n#exit on click\r\nprint(\"press click to exit\")\r\nturtle.exitonclick()\r\n","sub_path":"DDP 1/Tugas Pemrograman 1/1. Chessboard and Flower with Random Colors/TP_01_SamuelLudwigIan.py","file_name":"TP_01_SamuelLudwigIan.py","file_ext":"py","file_size_in_byte":3187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"532319418","text":"#!/usr/bin/python\n# -*- coding:Utf-8 -*-\n\n# Original: https://gist.github.com/radiosilence/3946121#file-gistfile1-py-L68\n# Dependencies (for both this script and colorz.py): python-yaml python-pil\n\nimport sys\nimport colorsys\nfrom colorz import colorz\n\nWALLPAPER = sys.argv[1]\nCOLORS = '/home/kevin/.colors/WallpaperGenerated.colors'\n\ncols = \"\"\"\n! Default\n*background: #0E0E0E\n*foreground: #FFFFFF\n*cursorColor: #FFFFFF\n\n! URxvt\nURxvt.background: [85]#0E0E0E\n\n! Colors\n\n\"\"\"\n\ndef normalize(hexv, minv=128, maxv=256):\n hexv = hexv[1:]\n r, g, b = (\n int(hexv[0:2], 16) / 256.0,\n int(hexv[2:4], 16) / 256.0,\n int(hexv[4:6], 16) / 256.0,\n )\n h, s, v = colorsys.rgb_to_hsv(r, g, b)\n minv = minv / 256.0\n maxv = maxv / 256.0\n if v < minv:\n v = minv\n if v > maxv:\n v = maxv\n r, g, b = colorsys.hsv_to_rgb(h, s, v)\n return '#{:02x}{:02x}{:02x}'.format(int(r * 256), int(g * 256), int(b * 256))\n\nif __name__ == '__main__':\n #if len(sys.argv) == 1:\n # n = 16\n #else:\n # n = int(sys.argv[1])\n # Always use 16 colors\n n = 16\n\n i = 0\n for c in colorz(WALLPAPER, n=n):\n # if i == 8:\n # i += 1\n if i == 0:\n c = normalize(c, minv=0, maxv=32)\n elif i == 8:\n c = normalize(c, minv=128, maxv=192)\n elif i < 8:\n c = normalize(c, minv=160, maxv=224)\n else:\n c = normalize(c, minv=200, maxv=256)\n cols += \"\"\"*color{}: {}\\n\"\"\".format(i, c)\n i += 1\n\n with open(COLORS, 'w') as f:\n f.write(cols)\n\n","sub_path":".scripts/CS_Generator/colorscheme.py","file_name":"colorscheme.py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"187691780","text":"#!/usr/bin/env python3\nfrom bs4 import BeautifulSoup\nimport requests, sys, csv, json, re, os, urllib.request\n\n# url variables\nurl1 = \"http://ufm.edu/Portal\"\nurl2 = \"http://ufm.edu/Estudios\"\nurl3 = \"https://fce.ufm.edu/carrera/cs/\"\nurl4 = \"http://ufm.edu/Directorio\"\n\n# print if needed, gets too noisy\n# print(soup.prettify())\n\nprint(\"\")\n\nclass Soup:\n#-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n def part1(self):\n print(\"==================================================================\")\n # Make a GET request to fetch the raw HTML content\n try:\n html_content = requests.get(url1).text\n except:\n print(\"unable to get {url1}\")\n sys.exit(1)\n print(\"1. Portal\")\n soup = BeautifulSoup(html_content, \"html.parser\")\n # Print Title\n title = soup.title.string\n print(\"GET the title and print it:\", title)\n print(\"------------------------------------------------------------------\") \n # Print the Complete Address of UFM\n for data in soup.find_all(\"meta\", {\"property\": \"og:url\"}):\n address = data.get(\"content\")\n print(\"GET the Complete Address of UFM:\", address)\n print(\"------------------------------------------------------------------\")\n # Print the phone number and info email\n for data in soup.find_all(\"a\", {\"href\": \"tel:+50223387700\"}):\n phone = data.text\n for data in soup.find_all(\"a\", {\"href\": \"mailto:inf@ufm.edu\"}):\n info_email = data.string\n print(\"GET the phone number and info email:\", phone, info_email)\n print(\"------------------------------------------------------------------\")\n # Print nav menu\n print(\"GET all item that are part of the upper nav menu (id: menu-table):\")\n n = 0\n for item in soup.find_all(id = \"menu-table\"):\n nav_menu = item.text\n nav_menu = nav_menu.replace(\"\\t\", \"\").replace(\"\\r\", \"\").replace(\"\\n\", \", \")\n nav_menu = \" \".join(nav_menu.split())\n nav_menu = nav_menu.split(\", \")\n nav_menu = filter(None, nav_menu)\n for i in nav_menu:\n if n < 7:\n print(\"-\", i)\n n += 1\n print(\"------------------------------------------------------------------\")\n # Print all hrefs\n print(\"Find all properties that have href (link to somewhere):\")\n if len(soup.find_all(href = True)) < 31:\n for link in soup.find_all(href = True):\n print(\"\\n-\", link)\n else:\n logfile = \"logs/find_all_properties_that_have_href.txt\"\n f = open(logfile, \"w+\")\n for link in soup.find_all(href = True):\n f.write(\"-\" + str(link)+ \"\\n\")\n f.close()\n print(\"Output exceeds 30 lines, sending output to:\", logfile)\n print(\"------------------------------------------------------------------\")\n # Print UFMail button href\n for link in soup.find_all(\"a\", {\"id\": \"ufmail_\"}):\n UFMail = link.get(\"href\")\n print(\"GET href of \\\"UFMail\\\" button:\", UFMail)\n print(\"------------------------------------------------------------------\")\n # Print MiU button href\n for link in soup.find_all(\"a\", {\"id\": \"miu_\"}):\n MiU = link.get(\"href\")\n print(\"GET href of \\\"MiU\\\" button:\", MiU)\n print(\"------------------------------------------------------------------\")\n print(\"Get hrefs of all
:\")\n # Print all
hrefs\n if len(soup.find_all(\"img\")) < 31:\n for link in soup.find_all(\"img\"):\n print(\"-\", link.get(\"src\"))\n else:\n logfile = \"logs/get_hrefs_of_all_img.txt\"\n f = open(logfile, \"w+\")\n for link in soup.find_all(\"img\"):\n f.write(\"-\" + str(link.get(\"src\"))+ \"\\n\")\n f.close()\n print(\"Output exceeds 30 lines, sending output to:\", logfile)\n print(\"------------------------------------------------------------------\")\n # Count all \n count = 0\n for i in soup.find_all(\"a\"):\n i\n count += 1\n print(\"Count all :\", count)\n print(\"------------------------------------------------------------------\")\n # Extra point, create a csv file from all a.\n print(\"Created a csv file from all , dumping to logs/extra_as.csv\")\n # Creats lists.\n text = []\n href = []\n # Parses page for all a and appends text and hrefs to the list\n for data in soup.find_all(\"a\"):\n texts = data.text\n hrefs = data.get(\"href\")\n texts = texts.replace(\"\\t\", \"\").replace(\"\\r\", \"\").replace(\"\\n\", \"\")\n texts = texts.replace(\"á\", \"a\").replace(\"é\", \"e\").replace(\"í\", \"i\").replace(\"ó\", \"o\").replace(\"ú\", \"u\").replace(\"ñ\", \"n\")\n texts = ' '.join(texts.split())\n text.append(texts)\n href.append(hrefs)\n\n # Creates csv file and dumps result\n n = 0\n filename = \"logs/extra_as.csv\"\n with open(filename, mode='w+') as f: \n f_writer = csv.writer(f)\n\n columnTitleRow = [\"Text\", \" href\"]\n f_writer.writerow(columnTitleRow)\n for i in href:\n f_writer.writerow([text[n], href[n]])\n n += 1\n \n if len(sys.argv) > 1:\n print(\"==================================================================\")\n else:\n pass\n return 0\n#-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n def part2(self):\n print(\"==================================================================\")\n try:\n html_content = requests.get(url2).text\n except:\n print(\"unable to get {url2}\")\n sys.exit(1)\n soup = BeautifulSoup(html_content, \"html.parser\")\n print(\"2. Estudios\")\n # Print topmenu items\n # Home button has no text to specify so nothing appears.\n print(\"Display all items from \\\"topmenu\\\" (8 in total): \")\n n = 0\n for item in soup.find_all(\"div\", {\"id\": \"topmenu\"}):\n top_menu = item.text\n top_menu = top_menu.replace(\"\\t\", \"\").replace(\"\\r\", \"\").replace(\"\\n\", \", \")\n top_menu = \" \".join(top_menu.split())\n top_menu = top_menu.split(\", \")\n top_menu = filter(None, top_menu)\n for i in top_menu:\n if n < 8:\n print(\"-\", i)\n n += 1\n print(\"------------------------------------------------------------------\")\n # Print all Estudios\n print(\"Display ALL \\\"Estudios\\\" (Doctorados/Maestrias/Posgrados/Licenciaturas/Baccalaureus): \")\n for item in soup.find_all(\"div\", {\"class\": \"estudios\"}):\n print(\"-\", item.text)\n print(\"------------------------------------------------------------------\")\n # Print all li leftbar items\n print(\"Display from \\\"leftbar\\\" all items (4 in total): \")\n for item in soup.find_all(\"div\", {\"class\": \"leftbar\"}):\n unwanted = item.find(\"div\", {\"class\": \"hidden-phone\"})\n unwanted.extract()\n item = str(item.text.strip())\n item = item.replace(\"\\n\", \",\")\n item = item.split(\",\")\n for i in item:\n print(\"-\", i)\n print(\"------------------------------------------------------------------\")\n # Print all social media with its links\n print(\"Get and display all available social media with its links (href) \\\"class=social pull-right\\\": \")\n for link in soup.find_all(\"div\", {\"class\": \"social pull-right\"}):\n for item in link.find_all(\"a\", {\"target\": \"_blank\"}):\n print(\"-\", item.get(\"href\"))\n print(\"------------------------------------------------------------------\")\n count = 0\n for i in soup.find_all(\"a\"):\n i\n count+=1\n print(\"Count all :\", count)\n \n if len(sys.argv) > 1:\n print(\"==================================================================\")\n else:\n pass\n return 0\n#-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n def part3(self):\n print(\"==================================================================\")\n try:\n html_content = requests.get(url3).text\n except:\n print(\"unable to get {url3}\")\n sys.exit(1)\n soup = BeautifulSoup(html_content, \"html.parser\")\n print(\"3. CS\")\n # Print Title\n title = soup.title.string\n print(\"GET the title and print it:\", title)\n print(\"------------------------------------------------------------------\")\n # Print all hrefs\n print(\"GET and display the href:\")\n if len(soup.find_all(href = True)) < 31:\n for link in soup.find_all(href = True):\n print(\"\\n-\", link.get(\"href\"))\n else:\n logfile = \"logs/get_and_display_the_href.txt\"\n f = open(logfile, \"w+\")\n for link in soup.find_all(href = True):\n f.write(\"-\" + str(link.get(\"href\"))+ \"\\n\")\n f.close()\n print(\"Output exceeds 30 lines, sending output to:\", logfile)\n print(\"------------------------------------------------------------------\")\n # Download the logo\n # Downlodea la imagen pero por alguna razón hay un problema al abrirlo. (Abre la imagen pero desaparece)\n for img in soup.find_all(\"img\", {\"class\": \"fl-photo-img wp-image-500 size-full\"}):\n imgUrl = img.get(\"src\")\n urllib.request.urlretrieve(imgUrl, \"images/logo.png\")\n print(\"Downloading the \\\"FACULTAD de CIENCIAS ECONOMICAS\\\" logo: \\n\", imgUrl)\n print(\"------------------------------------------------------------------\")\n print(\"GET following : \\\"title\\\", \\\"description\\\" (\\\"og\\\"): \")\n for link in soup.find_all(\"meta\", {\"property\": \"og:title\"}):\n print(\"\\n-\", link.get(\"content\"))\n for link in soup.find_all(\"meta\", {\"property\": \"og:description\"}):\n print(\"-\", link.get(\"content\"))\n print(\"------------------------------------------------------------------\")\n # Print the count of all \n count = 0\n for i in soup.find_all(\"a\"):\n i\n count += 1\n print(\"Count all :\", count)\n print(\"------------------------------------------------------------------\")\n # Print the count of all \n count = 0\n for i in soup.find_all(\"div\"):\n i\n count += 1\n print(\"Count all
:\", count)\n \n if len(sys.argv) > 1:\n print(\"==================================================================\")\n else:\n pass\n return 0\n#-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n def part4(self):\n print(\"==================================================================\")\n try:\n html_content = requests.get(url4).text\n except:\n print(\"unable to get {url4}\")\n sys.exit(1)\n soup = BeautifulSoup(html_content, \"html.parser\")\n print(\"4. Directorio\")\n # Count of emails with a vowel\n count = 0 \n for table in soup.find_all(\"table\", {\"class\": [\"tabla ancho100\", \"tabla ancho100 col3\"]}):\n match = re.findall(r\"[\\w\\.-]+@[\\w\\.-]+\", table.text)\n match = match + match\n match.sort()\n # Count of emails that start with with a vowel\n for table in soup.find_all(\"table\", {\"class\": [\"tabla ancho100\", \"tabla ancho100 col3\"]}):\n for word in re.findall(r\"[\\w\\.-]+@[\\w\\.-]+\", table.text):\n if word[0] in [\"a\",\"e\",\"i\",\"o\",\"u\",\"A\",\"E\",\"I\",\"O\",\"U\"]:\n count += 1\n logfile = \"logs/4directorio_emails.txt\"\n f = open(logfile, \"w+\")\n f.write(str(match))\n f.close()\n print(\"Sort emails alphabetically, sending output to\", logfile)\n print(\"------------------------------------------------------------------\")\n print(\"Count all emails that start with a vowel:\", count)\n print(\"------------------------------------------------------------------\")\n # Group in a JSON rows that have same address and dump into logs\n # Had to replace á, é, í, ó, ú because json wasn't accepting the values and would place the hexcode. \n print(\"Grouped all rows with Same Address, dumping to logs/4directorio_address.json\")\n # Parse both tables.\n table1 = soup.find(\"table\", {\"class\": \"tabla ancho100\"})\n table2 = soup.find_all(\"table\", {\"class\": \"tabla ancho100\"})[1]\n # Create empty lists \n location = []\n page = []\n # Checks data in table1\n for row in table1.find_all(\"tr\"):\n cells = row.find_all(\"td\")\n if len(cells) == 5: # Not really necessary\n var2 = cells[4].find(text = True).replace(\"\\n\", \"\").replace(\",\", \"\")\n var1 = cells[0].text\n var2 = \" \".join(var2.split())\n var1 = \" \".join(var1.split())\n # Replaces tildes to delete hex codes later on.\n var2 = var2.replace(\"á\", \"a\").replace(\"é\", \"e\").replace(\"í\", \"i\").replace(\"ó\", \"o\").replace(\"ú\", \"u\").replace(\"ñ\", \"n\")\n var1 = var1.replace(\"á\", \"a\").replace(\"é\", \"e\").replace(\"í\", \"i\").replace(\"ó\", \"o\").replace(\"ú\", \"u\").replace(\"ñ\", \"n\")\n # Adds results to lists\n location.append(var2)\n page.append(var1)\n # Checks data in table1\n for row in table2.find_all(\"tr\"):\n cells = row.find_all(\"td\")\n if len(cells) == 5: # Not really necessary\n var3 = cells[0].text\n var4 = cells[4].find(text = True).replace(\"\\n\", \"\").replace(\",\", \"\")\n var3 = \" \".join(var3.split())\n var4 = \" \".join(var4.split())\n # Replaces tildes to delete hex codes later on.\n var3 = var3.replace(\"á\", \"a\").replace(\"é\", \"e\").replace(\"í\", \"i\").replace(\"ó\", \"o\").replace(\"ú\", \"u\").replace(\"ñ\", \"n\")\n var4 = var4.replace(\"á\", \"a\").replace(\"é\", \"e\").replace(\"í\", \"i\").replace(\"ó\", \"o\").replace(\"ú\", \"u\").replace(\"ñ\", \"n\")\n # Adds results to lists\n page.append(var3)\n location.append(var4)\n dictionary = dict(zip(page, location))\n # Reverses the list (not necessary but I had an error doing it reversed since the beginning, would delete multiple values)\n ordered = {}\n for key, value in dictionary.items():\n if value not in ordered:\n ordered[value] = [key]\n else:\n ordered[value].append(key)\n\n # Creates json and dumps result.\n json_string = json.dumps(ordered)\n datastore = json.loads(json_string)\n filename = \"logs/4directorio_address.json\"\n if filename:\n with open(filename, \"w+\") as f:\n json.dump(datastore, f, indent = 4)\n print(\"------------------------------------------------------------------\")\n print(\"Correlated Faculty Dean and Directors into JSON, dumping to logs/4directorio_deans.json\")\n # Parse both tables.\n table3 = soup.find_all(\"table\", {\"class\": \"tabla ancho100 col3\"})[1]\n # Create empty lists \n dean = []\n faculty = []\n email = []\n phone = []\n # Checks data in table3\n for row in table3.find_all(\"tr\"):\n cells = row.find_all(\"td\")\n if len(cells) == 3: # Not really necessary\n var5 = cells[0].text\n var6 = cells[1].find(text = True).replace(\"\\n\", \"\")\n var7 = cells[2].text\n var5 = \" \".join(var5.split())\n var6 = \" \".join(var6.split())\n var7 = \" \".join(var7.split())\n # Replaces tildes to delete hex codes later on.\n var5 = var5.replace(\"á\", \"a\").replace(\"é\", \"e\").replace(\"í\", \"i\").replace(\"ó\", \"o\").replace(\"ú\", \"u\").replace(\"ñ\", \"n\")\n var6 = var6.replace(\"á\", \"a\").replace(\"é\", \"e\").replace(\"í\", \"i\").replace(\"ó\", \"o\").replace(\"ú\", \"u\").replace(\"ñ\", \"n\").replace(\", decano\", \"\")\n var7 = var7.replace(\"á\", \"a\").replace(\"é\", \"e\").replace(\"í\", \"i\").replace(\"ó\", \"o\").replace(\"ú\", \"u\").replace(\"ñ\", \"n\")\n var6 = \"Dean/Director: \" + var6\n var7 = \"E-mail: \" + var7\n if \"Facultad\" in var5:\n faculty.append(var5)\n dean.append(var6)\n email.append(var7)\n\n # Compares the faculty with rows in first table, if they are found, gets the phone number for that row.\n faculty_var = str(faculty).replace(\"Facultad de\", \"\").replace(\"[\", \"\").replace(\"\\'\", \"\").replace(\"]\", \"\")\n faculty_var = ' '.join(faculty_var.split())\n faculty_var = list(faculty_var.split(\", \"))\n table1 = soup.find(\"table\", {\"class\": \"tabla ancho100\"})\n for i in faculty_var:\n for row in table1.find_all(\"tr\"):\n cells = row.find_all(\"td\")\n if len(cells) == 5: # Not really necessary\n var9 = cells[2].text\n var8 = cells[0].text\n var9 = \" \".join(var9.split())\n var8 = \" \".join(var8.split())\n # Replaces tildes to delete hex codes later on.\n var9 = var9.replace(\"á\", \"a\").replace(\"é\", \"e\").replace(\"í\", \"i\").replace(\"ó\", \"o\").replace(\"ú\", \"u\").replace(\"ñ\", \"n\")\n var8 = var8.replace(\"á\", \"a\").replace(\"é\", \"e\").replace(\"í\", \"i\").replace(\"ó\", \"o\").replace(\"ú\", \"u\").replace(\"ñ\", \"n\")\n # Adds results to lists\n if i.replace(\"\\n\", \"\") == var8:\n var9 = \"Phone Number: \" + var9\n phone.append(var9)\n dictionary2 = dict((z[0], list(z[1:])) for z in zip(faculty, dean, email, phone))\n \n # Creates json and dumps result.\n json_string = json.dumps(dictionary2)\n datastore = json.loads(json_string)\n filename = \"logs/4directorio_deans.json\"\n if filename:\n with open(filename, mode = \"w+\") as f:\n json.dump(datastore, f, indent = 4) \n print(\"------------------------------------------------------------------\")\n print(\"Generated CSV file with directories of all 3 column tables, dumping to logs/4directorio_3column_tables.csv\")\n table1 = soup.find_all(\"table\", {\"class\": \"tabla ancho100 col3\"})[0]\n table2 = soup.find_all(\"table\", {\"class\": \"tabla ancho100 col3\"})[1]\n table3 = soup.find_all(\"table\", {\"class\": \"tabla ancho100 col3\"})[2]\n # Create empty lists \n entity = []\n fullname = []\n emails = []\n # Checks data in table1\n for row in table1.find_all(\"tr\"):\n cells = row.find_all(\"td\")\n if len(cells) == 3: # Not really necessary\n var10 = cells[0].text\n var11 = cells[1].find(text = True).replace(\"\\n\", \"\").replace(\",\", \"\")\n var12 = cells[2].text\n var10 = \" \".join(var10.split())\n var11 = \" \".join(var11.split())\n var12 = \" \".join(var12.split())\n if var11 is not \"\":\n # Replaces tildes to delete hex codes later on.\n var10 = var10.replace(\"á\", \"a\").replace(\"é\", \"e\").replace(\"í\", \"i\").replace(\"ó\", \"o\").replace(\"ú\", \"u\").replace(\"ñ\", \"n\")\n var11 = var11.replace(\"á\", \"a\").replace(\"é\", \"e\").replace(\"í\", \"i\").replace(\"ó\", \"o\").replace(\"ú\", \"u\").replace(\"ñ\", \"n\")\n var12 = var12.replace(\"á\", \"a\").replace(\"é\", \"e\").replace(\"í\", \"i\").replace(\"ó\", \"o\").replace(\"ú\", \"u\").replace(\"ñ\", \"n\")\n # Adds results to lists\n entity.append(var10)\n fullname.append(var11)\n emails.append(var12)\n # Checks data in table2\n for row in table2.find_all(\"tr\"):\n cells = row.find_all(\"td\")\n if len(cells) == 3: # Not really necessary\n var10 = cells[0].text\n var11 = cells[1].find(text = True).replace(\"\\n\", \"\").replace(\",\", \"\")\n var12 = cells[2].text\n var10 = \" \".join(var10.split())\n var11 = \" \".join(var11.split())\n var12 = \" \".join(var12.split())\n if var11 is not \"\":\n # Replaces tildes to delete hex codes later on.\n var10 = var10.replace(\"á\", \"a\").replace(\"é\", \"e\").replace(\"í\", \"i\").replace(\"ó\", \"o\").replace(\"ú\", \"u\").replace(\"ñ\", \"n\")\n var11 = var11.replace(\"á\", \"a\").replace(\"é\", \"e\").replace(\"í\", \"i\").replace(\"ó\", \"o\").replace(\"ú\", \"u\").replace(\"ñ\", \"n\")\n var12 = var12.replace(\"á\", \"a\").replace(\"é\", \"e\").replace(\"í\", \"i\").replace(\"ó\", \"o\").replace(\"ú\", \"u\").replace(\"ñ\", \"n\")\n # Adds results to lists\n entity.append(var10)\n fullname.append(var11)\n emails.append(var12)\n # Checks data in table3\n for row in table3.find_all(\"tr\"):\n cells = row.find_all(\"td\")\n if len(cells) == 3: # Not really necessary\n var10 = cells[0].text\n var11 = cells[1].find(text = True).replace(\"\\n\", \"\").replace(\",\", \"\")\n var12 = cells[2].text\n var10 = \" \".join(var10.split())\n var11 = \" \".join(var11.split())\n var12 = \" \".join(var12.split())\n if var11 is not \"\":\n # Replaces tildes to delete hex codes later on.\n var10 = var10.replace(\"á\", \"a\").replace(\"é\", \"e\").replace(\"í\", \"i\").replace(\"ó\", \"o\").replace(\"ú\", \"u\").replace(\"ñ\", \"n\")\n var11 = var11.replace(\"á\", \"a\").replace(\"é\", \"e\").replace(\"í\", \"i\").replace(\"ó\", \"o\").replace(\"ú\", \"u\").replace(\"ñ\", \"n\")\n var12 = var12.replace(\"á\", \"a\").replace(\"é\", \"e\").replace(\"í\", \"i\").replace(\"ó\", \"o\").replace(\"ú\", \"u\").replace(\"ñ\", \"n\")\n # Adds results to lists\n entity.append(var10)\n fullname.append(var11)\n emails.append(var12)\n\n # Creates csv file and dumps result\n n = 0\n filename = \"logs/4directorio_3column_tables.csv\"\n with open(filename, mode='w+') as f: \n f_writer = csv.writer(f, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n\n columnTitleRow = [\"Entity\", \" Fullname\", \" Email\"]\n f_writer.writerow(columnTitleRow)\n for i in entity:\n f_writer.writerow([entity[n], fullname[n], emails[n]])\n n += 1\n print(\"==================================================================\")\n return 0\n\n# Checks the command line and runs program according to input.\nrun = Soup()\nif len(sys.argv) > 1:\n if sys.argv[1] == \"1\":\n run.part1()\n elif sys.argv[1] == \"2\":\n run.part2()\n elif sys.argv[1] == \"3\":\n run.part3()\n elif sys.argv[1] == \"4\":\n run.part4()\n else:\n print(\"Error in command line, please specify which part to run with 1 to 4 or leave blank to run all parts\")\nelse:\n run.part1()\n run.part2()\n run.part3()\n run.part4()\n","sub_path":"soup.py","file_name":"soup.py","file_ext":"py","file_size_in_byte":24482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"548066909","text":"import unittest\nimport os\n\nfrom pds_doi_service.core.actions.draft import DOICoreActionDraft\n\n\nfrom pds_doi_service.core.util.general_util import get_logger\n\nlogger = get_logger(__name__)\n\ndef create_temporary_output_file(doi_label,filename):\n # Save doi_label so it can be compared to.\n temporary_file_name = filename\n temporary_file_ptr = open(temporary_file_name,\"w+\")\n temporary_file_ptr.write(doi_label + \"\\n\")\n temporary_file_ptr.close()\n return temporary_file_name\n\nclass MyTestCase(unittest.TestCase):\n db_name = 'doi_temp.db'\n # Because validation has been added to each action, the force=True is required as the command line is not parsed for unit test.\n\n @classmethod\n def setUp(self):\n # This setUp() function is called for every test.\n self._action = DOICoreActionDraft(db_name=self.db_name)\n logger.info(f\"Instantiate DOICoreActionDraft with database file {self.db_name}\")\n # Create output directory if one does not already exist.\n self._temporary_output_dir = './tests/data'\n os.makedirs(self._temporary_output_dir, exist_ok=True)\n\n @classmethod\n def tearDown(self):\n # This tearDown() function is called at end of every test.\n if os.path.isfile(self.db_name):\n os.remove(self.db_name)\n logger.info(f\"Removed test artifact database file {self.db_name}\")\n else:\n logger.info(f\"File not exist, test artifact database file {self.db_name}\")\n\n\n def test_local_dir_one_file(self):\n logger.info(\"test local dir with one file\")\n osti_doi = self._action.run(input='input/draft_dir_one_file',\n node='img',\n submitter='my_user@my_node.gov',force=True)\n logger.info(osti_doi)\n\n def test_local_dir_two_files(self):\n logger.info(\"test local dir with two files\")\n osti_doi = self._action.run(input='input/draft_dir_two_files',\n node='img',\n submitter='my_user@my_node.gov',force=True)\n logger.info(osti_doi)\n\n def test_local_bundle(self):\n logger.info(\"test local bundle\")\n osti_doi = self._action.run(input='input/bundle_in_with_contributors.xml',\n node='img',\n submitter='my_user@my_node.gov',force=True)\n logger.info(osti_doi)\n\n def test_remote_bundle(self):\n logger.info(\"test remote bundle\")\n osti_doi = self._action.run(\n input='https://pds-imaging.jpl.nasa.gov/data/nsyt/insight_cameras/bundle.xml',\n node='img',\n submitter='my_user@my_node.gov',force=True)\n logger.info(osti_doi)\n #create_temporary_output_file(osti_doi, os.path.join(self._temporary_output_dir,'valid_bundle_doi.xml'))\n\n def test_remote_collection(self):\n logger.info(\"test remote collection\")\n osti_doi = self._action.run(\n input='https://pds-imaging.jpl.nasa.gov/data/nsyt/insight_cameras/data/collection_data.xml',\n node='img', submitter='my_user@my_node.gov',force=True)\n logger.info(osti_doi)\n #create_temporary_output_file(osti_doi, os.path.join(self._temporary_output_dir,'valid_datacoll_doi.xml'))\n\n def test_remote_browse_collection(self):\n logger.info(\"test remote browse collection\")\n osti_doi = self._action.run(\n input='https://pds-imaging.jpl.nasa.gov/data/nsyt/insight_cameras/browse/collection_browse.xml',\n node='img', submitter='my_user@my_node.gov',force=True)\n logger.info(osti_doi)\n #create_temporary_output_file(osti_doi, os.path.join(self._temporary_output_dir,'valid_browsecoll_doi.xml'))\n\n def test_remote_calibration_collection(self):\n logger.info(\"test remote calibration collection\")\n osti_doi = self._action.run(\n input='https://pds-imaging.jpl.nasa.gov/data/nsyt/insight_cameras/calibration/collection_calibration.xml',\n node='img', submitter='my_user@my_node.gov',force=True)\n logger.info(osti_doi)\n #create_temporary_output_file(osti_doi, os.path.join(self._temporary_output_dir,'valid_calibcoll_doi.xml'))\n\n def test_remote_document_collection(self):\n logger.info(\"test remote document collection\")\n osti_doi = self._action.run(\n input='https://pds-imaging.jpl.nasa.gov/data/nsyt/insight_cameras/document/collection_document.xml',\n node='img', submitter='my_user@my_node.gov',force=True)\n logger.info(osti_doi)\n #create_temporary_output_file(osti_doi, os.path.join(self._temporary_output_dir,'valid_docucoll_doi.xml'))\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"pds_doi_service/core/actions/test/draft_test.py","file_name":"draft_test.py","file_ext":"py","file_size_in_byte":4994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"402544183","text":"import threading\nimport time\nimport logging\nimport random\nimport queue\n\nlogging.basicConfig(level=logging.DEBUG,\n format='(%(threadName)-9s) %(message)s',)\n\nBUF_SIZE = 10\nq = queue.Queue(BUF_SIZE)\n\nclass ProducerThread(threading.Thread):\n def __init__(self, name=None):\n super(ProducerThread,self).__init__()\n self.name = name\n\n def run(self):\n while True:\n if not q.full():\n item = {\n \"number\": random.randint(1,10)\n }\n q.put(item)\n logging.debug('Putting ' + str(item) \n + ' : ' + str(q.qsize()) + ' items in queue')\n time.sleep(random.random())\n return\n\nclass ConsumerThread(threading.Thread):\n def __init__(self, name=None):\n super(ConsumerThread,self).__init__()\n self.name = name\n return\n\n def run(self):\n while True:\n if not q.empty():\n item = q.get()\n logging.debug('Getting ' + str(item) \n + ' : ' + str(q.qsize()) + ' items in queue')\n time.sleep(random.random())\n return\n\nif __name__ == '__main__':\n \n p1 = ProducerThread(name='producer1')\n p2 = ProducerThread(name='producer2')\n c1 = ConsumerThread(name='consumer1')\n c2 = ConsumerThread(name='consumer2')\n\n p1.start()\n p2.start()\n time.sleep(2)\n c1.start()\n c2.start()\n time.sleep(2)\n","sub_path":"python-multithreading/producer_consumer_with_queue.py","file_name":"producer_consumer_with_queue.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"113248641","text":"import tensorflow as tf\nfrom data_reader import DataReader\nfrom loss import nt_xent\nfrom utils import LinearWarmUpCosineDecay, add_to_summary, determine_iterations_per_epoch\n\n\n@tf.function\ndef train_step(model, config, image1, image2):\n \"\"\"Perform one training step using normalized cross entropy loss.\n\n Args:\n model: a tensorflow keras model\n config: model configurations\n image1: First augmented image batch [B, H, W, CH]\n image2: Second augmented image batch [B, H, W, CH]\n Returns:\n loss: sum of training loss and regularization loss\n gradients: gradients of model weights\n \"\"\"\n\n with tf.GradientTape() as tape:\n z1 = model(image1, training=True)\n z2 = model(image2, training=True)\n z1 = tf.math.l2_normalize(z1, axis=1)\n z2 = tf.math.l2_normalize(z2, axis=1)\n loss = nt_xent(z1, z2, config.batch_size, config.temperature, config.zdim)\n reg_loss = tf.add_n(model.losses) if model.losses else 0\n loss = loss + reg_loss\n\n gradients = tape.gradient(loss, model.trainable_variables)\n\n return loss, gradients\n\ndef pretrain_cifar10(model, config):\n \"\"\"Pretrains the model based on config settings\n\n This function first creates an instance of the DataReader class.\n Next a projection head is added to the base model. Weights are restored\n if previously saved and finally the model is trained over the entire \n dataset for the pre-specified number of epochs.\n\n Args:\n model: a tensorflow keras model\n config: model configurations\n \"\"\"\n\n resnet_output = model.output\n layer1 = tf.keras.layers.GlobalAveragePooling2D(name='GAP')(resnet_output)\n layer2 = tf.keras.layers.Dense(units=config.zdim*2, activation='relu')(layer1)\n model_output = tf.keras.layers.Dense(units=config.zdim)(layer2)\n model = tf.keras.Model(model.input, model_output)\n \n x_train = load_cifar10(config)\n\n iterations_per_epoch = x_train.shape[0]//config.batch_size\n total_iterations = iterations_per_epoch*config.num_epochs\n\n learning_rate = LinearWarmUpCosineDecay(total_iterations, config.learning_rate)\n optimizer = tf.keras.optimizers.SGD(learning_rate=config.learning_rate, momentum=0.9)\n\n checkpoint = tf.train.Checkpoint(step=tf.Variable(1), optimizer=optimizer, net=model)\n manager = tf.train.CheckpointManager(checkpoint, config.pretrain_save_path, max_to_keep=10)\n\n # restore weights if they exist\n if manager.latest_checkpoint:\n checkpoint.restore(manager.latest_checkpoint)\n print('Restoring weights from {}'.format(manager.latest_checkpoint))\n else:\n print('Training model from scratch')\n\n summary_writer = tf.summary.create_file_writer(config.pretrain_save_path)\n\n epoch_loss = [] \n current_epoch = tf.cast(tf.floor(optimizer.iterations/iterations_per_epoch), tf.int64)\n data = DataReader(config)\n batch = data.read_cifar10(x_train, current_epoch, num_epochs=config.num_epochs)\n for (image1, image2), epoch in batch:\n loss, grads = train_step(model, config, image1, image2)\n epoch_loss.append(loss)\n\n optimizer.__setattr__('lr', learning_rate(optimizer.iterations))\n optimizer.apply_gradients(zip(grads, model.trainable_variables)) \n\n checkpoint.step.assign_add(1)\n if checkpoint.step.numpy() % 100 == 0:\n add_to_summary(summary_writer, loss, optimizer.__getattribute__('lr'), image1, image2, checkpoint.step.numpy())\n summary_writer.flush()\n\n if tf.reduce_all(tf.equal(epoch, current_epoch)):\n print(\"Loss after epoch {}: {}\".format(current_epoch, sum(epoch_loss)/len(epoch_loss)))\n epoch_loss = []\n current_epoch += 1\n\n if current_epoch % 100 == 0:\n save_path = manager.save()\n print(\"Saved checkpoint for epoch {}: {}\".format(current_epoch, save_path))\n\n\ndef load_cifar10(config):\n \"\"\"Loads training data for cifar10.\n Args: \n config: model configurations\n Returns:\n x_train: training images\n \"\"\"\n\n config.input_size = [32, 32]\n config.crop_size = [32, 32]\n (x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()\n\n return x_train\n","sub_path":"simclr/train_cifar10.py","file_name":"train_cifar10.py","file_ext":"py","file_size_in_byte":4258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"78924259","text":"# PyJobCrawler - finds python job offers on trojmiasto.pl site\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\n\r\n\r\ndef main_crawl(max_pages):\r\n \"\"\" Crawls through set number of job pages, returns dictionary of links with titles\"\"\"\r\n page = 0\r\n url_dict = {}\r\n\r\n while page <= max_pages:\r\n url = \"http://ogloszenia.trojmiasto.pl/praca-zatrudnie/slb,4,o0,0.html?strona=\" + str(page)\r\n source_code = requests.get(url)\r\n plain_text = source_code.text\r\n soup = BeautifulSoup(plain_text, \"html5lib\")\r\n data = soup.findAll('div',attrs={'class': \"ogl-head\"})\r\n\r\n for div in data:\r\n links = div.findAll('a')\r\n for link in links:\r\n url = link.get('href')\r\n title = link.string\r\n if get_data(url):\r\n url_dict.update({title: url})\r\n page += 1\r\n\r\n return url_dict\r\n\r\ndef get_data(item_url):\r\n \"\"\" Gets data from subpages, and checks if there is python involved\"\"\"\r\n key_words = [\"Python\", \"python\"]\r\n source_code = requests.get(item_url)\r\n plain_text = source_code.text\r\n soup = BeautifulSoup(plain_text, \"html5lib\")\r\n\r\n for word in key_words:\r\n if word in str(soup):\r\n return True\r\n else:\r\n return False\r\n\r\ndef save_to_txt(dict):\r\n \"\"\" Saves results to txt file\"\"\"\r\n file = open(\"links.txt\", 'w')\r\n\r\n for key, value in dict.items():\r\n file.write(key + \"\\n\" + value + \"\\n\\n\\n\")\r\n\r\n file.close()\r\n\r\n\r\n# *** main ***\r\n\r\npage_num = 2 # number of pages to crawl through\r\nsave_to_txt(main_crawl(page_num))\r\n","sub_path":"pyjobcrawler.py","file_name":"pyjobcrawler.py","file_ext":"py","file_size_in_byte":1618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"227081895","text":"import re\nimport logging\nfrom datetime import timedelta\n\nfrom django.contrib import messages\nfrom django.contrib.auth import login\nfrom django.http import HttpResponseNotFound\nfrom django.shortcuts import redirect\nfrom django.urls import reverse_lazy\nfrom django.utils import timezone\nfrom django.utils.http import is_safe_url\nfrom django.views.generic import View, FormView, TemplateView\n\nfrom magicauth.forms import EmailForm\nfrom magicauth.models import MagicToken\nfrom magicauth import settings as magicauth_settings\n\nlogger = logging.getLogger()\n\nclass LoginView(FormView):\n \"\"\"\n The login page. The user enters their email in the form to get a link by email.\n \"\"\"\n\n form_class = EmailForm\n success_url = reverse_lazy(\"magicauth-email-sent\")\n template_name = magicauth_settings.LOGIN_VIEW_TEMPLATE\n\n def get(self, request, *args, **kwargs):\n next_view = self.request.GET.get(\n \"next\", f\"/{magicauth_settings.LOGGED_IN_REDIRECT_URL_NAME}/\"\n )\n if request.user.is_authenticated:\n return redirect(next_view)\n\n return super(LoginView, self).get(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n context = super(LoginView, self).get_context_data(**kwargs)\n context[\n \"LOGGED_IN_REDIRECT_URL_NAME\"\n ] = magicauth_settings.LOGGED_IN_REDIRECT_URL_NAME\n context[\"LOGOUT_URL_NAME\"] = magicauth_settings.LOGOUT_URL_NAME\n return context\n\n def form_valid(self, form, *args, **kwargs):\n next_view = self.request.GET.get(\n \"next\",\n f\"/{magicauth_settings.LOGGED_IN_REDIRECT_URL_NAME}/\"\n )\n current_site = self.request.site\n form.send_email(current_site, next_view)\n return super().form_valid(form)\n\n\nclass EmailSentView(TemplateView):\n \"\"\"\n View shown to confirm the email has been sent.\n \"\"\"\n\n template_name = magicauth_settings.EMAIL_SENT_VIEW_TEMPLATE\n\n\nclass ValidateTokenView(View):\n \"\"\"\n The link sent by email goes to this view.\n It validates the token passed in querystring,\n and either logs in or shows a form to make a new token.\n \"\"\"\n\n @staticmethod\n def get_valid_token(key):\n duration = magicauth_settings.TOKEN_DURATION_SECONDS\n try:\n token = MagicToken.objects.get(key=key)\n except MagicToken.DoesNotExist:\n return None\n except MagicToken.MultipleObjectsReturned:\n return None\n\n if token.created < timezone.now() - timedelta(seconds=duration):\n token.delete()\n return None\n return token\n\n def get(self, request, *args, **kwargs):\n full_path = request.get_full_path()\n\n rule_for_redirect = re.compile(\"(.*next=)(.*)\")\n next_view = rule_for_redirect.match(full_path)\n redirect_default = reverse_lazy(magicauth_settings.LOGGED_IN_REDIRECT_URL_NAME)\n url = next_view.group(2) if next_view else redirect_default\n\n # the following `is_safe_url` will be deprecated in django 4 and replaced by url_has_allowed_host_and_scheme\n if not is_safe_url(url, allowed_hosts={request.get_host()}, require_https=True):\n # We are not logging the unsafe URL to prevent code injections in logs\n logger.warning(\"[MagicAuth] an unsafe URL was used through a login link\")\n return HttpResponseNotFound()\n\n if request.user.is_authenticated:\n return redirect(url)\n token_key = kwargs.get(\"key\")\n token = self.get_valid_token(token_key)\n if not token:\n messages.warning(\n self.request,\n \"Ce lien de connexion ne fonctionne plus. \"\n \"Pour en recevoir un nouveau, nous vous invitons à renseigner \"\n \"votre email ci-dessous puis à cliquer sur valider.\",\n )\n return redirect(\"magicauth-login\")\n login(self.request, token.user)\n MagicToken.objects.filter(\n user=token.user\n ).delete() # Remove them all for this user\n return redirect(url)\n","sub_path":"magicauth/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"464219477","text":"#!/usr/bin/env python3\n\n# Author: Logan Tillman\n# NetID: Ltillma4\n# Hw8\n\nfrom numpy import round, array, polyfit, polyder, polyval\n\n# Problem 5.1.9\n\n# For this problem, I'm computing an approximation using both a second-order accurate \n# central FDA method and Richardson Extrapolation.\n# \n# Name: Logan Tillman \n# Hw8\n\nprint(\"Problem 5.1.9\\n=============\")\n#\n# This is an example of a dictionary with key:value pairs,\n# data9[0.1]=0.078348 and data9[0.3]=0.192916, for example.\n#\ndata9 = { 0.0: 0.0, 0.1: 0.078348, 0.2: 0.138910, 0.3: 0.192916, 0.4: 0.244981 }\n#\n# We want to approximate f'(0.2) using two different values of h (=0.2, 0.1)\n#\n# First use central difference FDA, i.e., 2hf'(x) = -f(x-h) + f(x+h), with h=0.2\n#\nh1 = 0.2\nx = 0.2\ncentralApprox1 = (-data9[x-h1] + data9[x+h1]) / (2*h1)\nprint(\"Central FDA Approximation (h = 0.2): %f\" % centralApprox1)\n#\n# Now, half the stepsize and set h=0.1\n# and use the function all round(x+h2, decimals=1) so that you can\n# insure the proper key from the data9 dictionary will be selected.\n#\nh2 = h1 / 2.0\ncentralApprox2 = (-data9[round(x-h2, decimals=1)] + data9[round(x+h2, decimals=1)]) / (2*h2)\n#\n# Now, apply Richardson Extrapolation to improve the approximation of f'(0.2)\n# using centralApprox1 and centralApprox2\np = 2.0\nrichardsonExtrap = (2**p * centralApprox2 - centralApprox1) / (2**p - 1)\nprint(\"Richardson Extrapolation (h2 = h1 / 2, p = 2): %f\" % richardsonExtrap)\n#\n# Problem 5.1.11\n\n# For this problem I'm using polynomial interpolation to compute the derivatives at a specific point, x\n# I'm using a polynomial of the third degree to approximate the function\n# \n\nprint(\"\\nProblem 5.1.11\\n==============\")\n#\n# Here are the interpolation data points...\n#\nxData = array([-2.2, -0.3, 0.8, 1.9])\nyData = array([15.18, 10.962, 1.92, -2.04])\n\n# Using polyfit and polyder from Numpy...\n\n# First compute the coefficients for a cubic interpolating\n# polynomial using polyfit\n\np = polyfit(xData, yData, 3)\n#\n# Then, use those coefficients to construct the first (d1) and\n# second (d2) derivative functions via polyder...\n#\nd1 = polyder(p, 1) \nd2 = polyder(p, 2)\n#\n# Here are the coeffcients of the actual polynomial, whose derivatives\n# we are approximating.\n#\npActual = array([1, -0.3, -8.56, 8.448])\n\n# Use those coefficients to construct the first (d1Actual) and\n# second (d2Acutal) derivative functions via polyder...\n\nd1Actual = polyder(pActual, 1)\nd2Actual = polyder(pActual, 2)\n\n# Now, evaluate the first derivative of your cubic interpolating polynomial\n# at x=0, and then do the same for the first derivative of the actual\n# polynomial whose coefficients are given in pActual.\n\nfd = polyval(d1, 0) \nfdActual = polyval(d1Actual, 0)\n\nprint(\"Interpolated f'(0): %f\" % fd)\nprint(\"Actual f'(0): %f\" % fdActual)\nprint(\"Error: %f\\n\" % (fdActual - fd))\n\n# Now, evaluate the second derivative of your cubic interpolating polynomial\n# at x=0, and then do the same for the second derivative of the actual\n# polynomial whose coefficients are given in pActual.\n\nsd = polyval(d2, 0)\nsdActual = polyval(d2Actual, 0)\n\nprint(\"Interpolated f''(0): %f\" % sd)\nprint(\"Actual f''(0): %f\" % sdActual)\nprint(\"Error: %f\" % (sdActual - sd))\n#\n# Outputs for verification:\n#\n# Problem 5.1.9\n# =============\n# Central FDA Approximation (h = 0.2): 0.612452\n# Richardson Extrapolation (h2 = h1 / 2, p = 2): 0.559636\n#\n# Problem 5.1.11\n# ==============\n# Interpolated f'(0): -8.560000\n# Actual f'(0): -8.560000\n# Error: -0.000000\n#\n# Interpolated f''(0): -0.600000\n# Actual f''(0): -0.600000\n# Error: -0.000000","sub_path":"hw8/ltillma4.hw8.py","file_name":"ltillma4.hw8.py","file_ext":"py","file_size_in_byte":3556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"308916768","text":"from configparser import ConfigParser\nimport logging\nfrom transformers import GPT2LMHeadModel, GPT2Tokenizer\nimport torch\nfrom torch.nn import functional as F\n\nfrom decoder import top_k_top_p_filtering\nfrom discord_bot import get_prescripted_lines\nfrom model import load_model, download_model_folder\n\n# Enable logging\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\nhandler = logging.FileHandler(filename='discord.log', encoding='utf-8', mode='w')\nhandler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))\nlogger.addHandler(handler)\n\n# Load GPT-2 tokenizer and model\n# tokenizer = GPT2Tokenizer.from_pretrained('gpt2')\n# model = GPT2LMHeadModel.from_pretrained('gpt2')\n\nconfig_parser = ConfigParser(allow_no_value=True)\nwith open(\"chatbot.cfg\") as f:\n config_parser.read_file(f)\ntarget_folder_name = download_model_folder(config_parser)\nmodel, tokenizer = load_model(target_folder_name, config_parser)\n\n\nglobal static_history\nstatic_history = get_prescripted_lines(\"./constant_thoughts.txt\")\nhistory = \"\"\nfor message in static_history:\n history += message \n# Tokenize input phrase\nhistory += f'Starting to think that sometimes it is my testing input that could be a problem. Same with the numbers or the long winded sentences. This is a conversational model, so feeding her say a stacktrace of course would be a problem for her. I sleep in a bed that is poorly '\ncontext_ids = tokenizer.encode(history, return_tensors='pt')\n# num_samples = config_parser.getint('decoder', 'num_samples')\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\ncontext_tensor = torch.tensor(context_ids, dtype=torch.long, device=device)\ngenerated = context_tensor\nwith torch.no_grad():\n while True:\n inputs = {'input_ids': context_ids}\n # Get logits from last layer\n last_layer_logits = model(**inputs)[0][:, -1, :] / .6474\n\n # Keep top 30 logits at max; stop if cumulative probability >= 1.0.\n top_logits = top_k_top_p_filtering(last_layer_logits, top_k=40, top_p=0.0)\n\n next_token = torch.argmax(top_logits, dim=-1).unsqueeze(-1)\n logger.debug(\"argmax_next_token: \" + str(next_token))\n \n # Softmax the logits into probabilities\n probabilities = F.softmax(top_logits, dim=-1)\n\n # Generate next token\n generated_next_token = torch.multinomial(probabilities, num_samples=3)\n logger.debug(\"mult_next_token: \" + str(generated_next_token))\n\n generated = torch.cat([generated, generated_next_token], dim=-1)\n\n # generated = torch.cat((generated, next_token), dim=1)\n \n # logger.debug(\"length: \" + str(len(context_ids)))\n # logger.debug(\"original_tokening: \" + str(generated[:, len(context_ids):]))\n # logger.debug(\"meow_tokening: \" + str(generated[:len(context_ids):]))\n # logger.debug(\"alt_meow_tokening: \" + str(generated[:len(context_ids),:]))\n # logger.debug(\"alt_tokening: \" + str(generated[:, :len(context_ids)]))\n # if (generated[:, len(context_ids):] == tokenizer.eos_token_id).any(dim=1).all():\n # # EOS token id found in each sample\n # logger.debug(\"EOS token id found in each sample\")\n # break\n if generated.shape[1] - len(context_ids) >= 25:\n # Maximum length reached\n logger.debug(\"Maximum length reached\")\n break\n# # Get logits from last layer\n# last_layer_logits = model(inputs)[0][:, -1, :]\n\n# # Keep top 30 logits at max; stop if cumulative probability >= 1.0.\n# top_logits = top_k_top_p_filtering(last_layer_logits, top_k=30, top_p=0.0)\n\n# # Softmax the logits into probabilities\n# probabilities = F.softmax(top_logits, dim=-1)\n\n# # Generate next token\n# generated_next_token = torch.multinomial(probabilities, num_samples=1)\n# generated = torch.cat([inputs, generated_next_token], dim=-1)\n\n# Get result\nsamples = generated[:, len(context_ids):].tolist()\n# result_string = tokenizer.decode(samples[0])\ntexts = []\nfor sample in samples:\n text = tokenizer.decode(sample)\n # text = text[: text.find(tokenizer.eos_token)]\n texts.append(text)\n\n# Print string\nprint(texts)","sub_path":"gpt2bot/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"375743575","text":"import cgi\nfrom .Student import *\n\nclass Starosta(Student):\n def __init__(self,q):\n super().__init__(q)\n self.age=age=\"\"\n self.salary=salary=\"\"\n self.q=self.q\n \n def Edit(self,number,q,db):\n self.name = q['name'].value\n self.group = q['group'].value\n self.mark = q['mark'].value\n self.age=q['age'].value\n self.salary=q['salary'].value\n self.tip=q['tip'].value\n if number==0:\n db.execute('insert into university values(NULL,?, ?,?, ?, ?,?)', (self.tip,self.name, self.group, self.mark,self.age,self.salary))\n else:\n db.execute('update university set tip=?, name=?, p_group=?, mark=?, age=?, salary=? where iid=?',(self.tip,self.name, self.group, self.mark,self.age,self.salary,number))\n \n def DBLoad(self, r):\n self.name = r['name']\n self.group = r['p_group']\n self.mark = r['mark']\n self.age = r['age']\n self.salary = r['salary']\n self.tip = r['tip']\n self.number=r['iid']\n\n def change(self,number):\n print(\"\"\"\n
\n
Назад\n \"\"\".format(self.q['student'].value, self.q.getvalue('i'), self.name,self.group, self.mark, self.age, self.salary, number))\n\n def show(self, i):\n print(\"\"\"\nИмя: {2}
\nГруппа: {3}
\nСредний балл: {4}
\nВозраст: {5}
\nЗарплата: {6}
\nТип: {7}
\nНомер в базе: {8}
\n
Редактировать |
Удалить\n
\n\"\"\".format(self.q['student'].value, i, self.name, self.group, self.mark, self.age, self.salary, self.tip, self.number))\n\n\n","sub_path":"cgi-bin/st22/Starosta.py","file_name":"Starosta.py","file_ext":"py","file_size_in_byte":2364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"247241753","text":"# APP to display a graph of all the people who have a wikipedia page, went to trinity college and aren't irish citizens\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport requests\n\n\ndef get_result(): \n\turl = 'https://query.wikidata.org/sparql'\n\tquery = \"\"\"\n\tSELECT ?item ?itemLabel ?country ?countryLabel\n\tWHERE{\n\t\t ?item wdt:P31 wd:Q5.\n\t\t ?item wdt:P69 wd:Q258464 .\n\t\t ?item wdt:P27 ?country. \n\t\t MINUS { \n\t\t\t?item wdt:P27 wd:Q27. \n\t\t }\n\t\t MINUS { \n\t\t\t?item wdt:P27 wd:Q22890.\n\t\t }\n\t\t MINUS { \n\t\t\t?item wdt:P27 wd:Q1140152.\n\t\t }\n\t\t MINUS { \n\t\t\t?item wdt:P27 wd:Q57695350.\n\t\t }\n\t\t MINUS { \n\t\t\t?item wdt:P27 wd:Q2712121.\n\t\t }\n\t\t MINUS { \n\t\t\t?item wdt:P27 wd:Q215530.\n\t\t }\n\t\tMINUS { \n\t\t\t?item wdt:P27 wd:Q174193.\n\t\t }\n\t\t \n\t\tMINUS { \n\t\t\t?item wdt:P27 wd:Q31747.\n\t\t }\n\t\t SERVICE wikibase:label { bd:serviceParam wikibase:language \"en\" }\n\t \n\t}\n\t\"\"\"\n\tr = requests.get(url, params = {'format': 'json', 'query': query})\n\tdata = r.json()\n\tresults = data[\"results\"]\n\tcountries = []\n\tfor value in results.values():\n\t\tfor result in value:\n\t\t\tcountries.append(result[\"countryLabel\"][\"value\"])\n\treturn countries \n\ndef generate_graph(countries): \n\tcountry_labels = list(set(countries))\n\tcountry_count = {}\n\tfor label in country_labels:\n\t\tcountry_count[label] = countries.count(label)\n\n\tcountry_count = sorted(country_count.items(), key=lambda x: x[1])\n\tx = [x[0] for x in country_count]\n\ty = [x[1] for x in country_count]\n\n\tfig, ax = plt.subplots()\n\twidth = 0.75 # the width of the bars\n\tind = np.arange(len(y)) # the x locations for the groups\n\tax.barh(ind, y, width, color=\"blue\")\n\tax.set_yticks(ind+width/2)\n\tax.set_yticklabels(x, minor=False, fontsize=7)\n\tfor i, v in enumerate(y):\n\t\tax.text(v + 0.5, i - 0.25, str(v), color='blue', fontweight='bold', fontsize=8)\n\tplt.title('Non Irish citizens who went to Trinity College')\n\tplt.xlabel('Number of students')\n\t# plt.savefig('bar_chart.png',)\n\tplt.show()\n\t\n\nif __name__ == \"__main__\":\n\tgraph_data = get_result() \n\tgenerate_graph(graph_data) \n","sub_path":"trinity_students.py","file_name":"trinity_students.py","file_ext":"py","file_size_in_byte":1991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"150187293","text":"#!/usr/bin/env python3\n\nimport argparse\nimport json\nimport os\nfrom pathlib import Path\n\ndef emit_version():\n print(1)\n\n\ndef emit_name():\n print(\"markdown_tool\")\n\n\ndef emit_applicable():\n print(\"true\")\n\ndef run(path):\n pathlist = Path(path).glob('**/*.txt')\n\n tool_notes = []\n for filename in pathlist:\n tool_notes.extend(process_file(filename))\n\n print(json.dumps(tool_notes))\n\ndef process_file(filename):\n file_display = str(filename)\n tool_notes = []\n with open(filename, 'r') as f:\n current_line = 0\n for line in f:\n current_line += 1\n if(\"markdown comment\" in line):\n tool_notes.append(line_to_tool_note(file_display, current_line, \"# Markdown Header\\n\\nMarkdown Body\"))\n if(\"markdown code snippet\" in line):\n tool_notes.append(line_to_tool_note(file_display, current_line, \"```rust\\nlet best_programming_language = \\\"🦀\\\";\\n```\"))\n \n return tool_notes\n\ndef line_to_tool_note(filename, line_number, message):\n return {\n \"type\": \"Markdown Tool\",\n \"message\": message,\n \"file\": filename,\n \"line\": line_number\n }\n\ndef main():\n parser = argparse.ArgumentParser(description='Markdown Tool')\n parser.add_argument('path', metavar='PATH', help='Path to code')\n parser.add_argument('commit_hash', metavar='HASH', help='Commit hash')\n parser.add_argument('command', metavar='COMMAND', help='Command')\n\n args = parser.parse_args()\n\n path = args.path\n\n command = args.command\n\n if command == \"version\":\n emit_version()\n elif command == \"name\":\n emit_name()\n elif command == \"applicable\":\n emit_applicable()\n elif command == \"run\":\n run(path)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":".lift/markdown_tool.py","file_name":"markdown_tool.py","file_ext":"py","file_size_in_byte":1789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"15981398","text":"# -*- coding: utf-8 -*-\n\nfrom django.contrib import admin\nfrom django.contrib.auth.models import Group, User\nfrom django.utils.translation import ugettext_lazy as _\nfrom django import forms\nfrom django.shortcuts import get_object_or_404\nfrom django.core.exceptions import PermissionDenied\nfrom django.contrib.admin.sites import NotRegistered\n\nfrom cpi.apps.instances.models import Instance\nfrom cpi.apps.accounts.models import UserProfilePerInstance\nfrom cpi.apps.accounts.models import CPIUser\n\ntry:\n admin.site.unregister(User)\n admin.site.unregister(Group)\nexcept NotRegistered:\n pass\n\nfrom django.utils.safestring import mark_safe\n\nimport logging\nlog = logging.getLogger(__name__)\n\n\nclass UserProfileGameListFilter(admin.SimpleListFilter):\n title = 'games'\n parameter_name = 'game_slug'\n\n def lookups(self, request, model_admin):\n game_options = []\n for g in Instance.objects.all():\n if request.user.is_superuser or request.user in g.curators:\n game_options.append((g.slug, g.slug))\n return game_options\n\n def queryset(self, request, queryset):\n game_slug = self.value()\n if not game_slug:\n return queryset\n game = get_object_or_404(Instance, slug=game_slug.lower())\n\n if request.user.is_superuser or request.user in game.curators:\n user_ids = UserProfilePerInstance.objects.filter(instance=game).values_list('player_profile', flat=True)\n user_ids = CPIUser.objects.filter(pk__in=user_ids)\n return queryset.filter(pk__in=user_ids).order_by('-date_joined')\n\n return queryset\n\n\nclass AdminImageWidget(forms.FileInput):\n \"\"\"\n A ImageField Widget for admin that shows a thumbnail.\n \"\"\"\n\n def __init__(self, attrs={}):\n super(AdminImageWidget, self).__init__(attrs)\n\n def render(self, name, value, attrs=None):\n output = []\n if value and hasattr(value, \"url\"):\n output.append(('
'\n '
'\n % (value.url, value.url)))\n output.append(super(AdminImageWidget, self).render(name, value, attrs))\n return mark_safe(u''.join(output))\n\n\nclass UserProfilePerInstanceInline(admin.StackedInline):\n model = UserProfilePerInstance #UserProfile.instances.through\n filter_horizontal = ('stakes', 'affiliations',)\n readonly_fields = ('instance', 'affiliations', 'stakes', 'coins_awards', 'coins', 'coins_total_earned',)\n extra = 0\n max_num = 1\n\n # define the raw_id_fields\n raw_id_fields = ('stakes', 'affiliations',)\n # define the autocomplete_lookup_fields\n autocomplete_lookup_fields = {\n #'fk': [''],\n 'm2m': ['stakes', 'affiliations', ],\n }\n\n fieldsets = (\n (_('Game Info'), {\n 'classes': ('grp-collapse grp-open', 'wide',),\n 'fields': ('official_title', 'instance', 'affiliations', 'stakes', )\n }),\n (_('Permissions'), {\n 'classes': ('grp-collapse grp-open',),\n 'fields': ('is_curator', )\n }),\n (_('Coins Info'), {\n 'classes': ('grp-collapse grp-open', 'wide',),\n 'fields': ('coins_awards', 'coins', 'coins_total_earned', )\n }),\n )\n\n def queryset(self, request):\n qs = super(UserProfilePerInstanceInline, self).queryset(request)\n if request.user.is_superuser:\n return qs\n\n my_game_pks = UserProfilePerInstance.objects. \\\n filter(player_profile=request.user, is_curator=True). \\\n values_list('instance', flat=True)\n return qs.filter(instance__pk__in=my_game_pks)\n\n def formfield_for_foreignkey(self, db_field, request, **kwargs):\n if db_field.name == \"instance\":\n if not request.user.is_superuser:\n my_games = UserProfilePerInstance.objects.\\\n filter(player_profile=request.user, is_curator=True).\\\n values_list('instance', flat=True)\n kwargs[\"queryset\"] = Instance.objects.filter(pk__in=my_games)\n return super(UserProfilePerInstanceInline, self).\\\n formfield_for_foreignkey(db_field, request, **kwargs)\n\n\nclass CuratorFilter(admin.SimpleListFilter):\n title = 'curators'\n parameter_name = 'player_profile'\n\n def lookups(self, request, model_admin):\n return (\n (0, \"No\"),\n (1, \"Yes\"),\n )\n\n def queryset(self, request, queryset):\n if self.value() is not None:\n if request.user.is_superuser:\n pks = UserProfilePerInstance.objects.filter(is_curator=bool(int(self.value()))).values_list('player_profile', flat=True)\n return queryset.filter(pk__in=pks)\n\n my_game_pks = UserProfilePerInstance.objects. \\\n filter(player_profile=self.request.user, is_curator=True).values_list('instance', flat=True)\n pks = UserProfilePerInstance.objects.\\\n filter(instance__pk__in=self.my_game_pks, is_curator=bool(int(self.value()))).\\\n values_list('player_profile', flat=True)\n return queryset.filter(pk__in=pks)\n\n\nclass ProfileForm(forms.ModelForm):\n\n avatar_thumb = forms.ImageField(label=_('Avatar'), required=False, widget=AdminImageWidget)\n\n def __init__(self, *args, **kwargs):\n super(ProfileForm, self).__init__(*args, **kwargs)\n try:\n #self.fields['first_name'].initial = self.instance.user.first_name\n #self.fields['last_name'].initial = self.instance.user.last_name\n self.fields['avatar_thumb'].initial = self.instance.avatar\n #self.fields['last_login'].initial = self.instance.user.last_login\n #self.fields['date_joined'].initial = self.instance.user.date_joined\n #self.fields['is_active'].initial = self.instance.user.is_active\n #self.fields['is_superuser'].initial = self.instance.user.is_superuser\n #self.fields['email'].initial = self.instance.user.email\n except CPIUser.DoesNotExist:\n pass\n\n self.fields['last_login'].widget.attrs['readonly'] = True\n self.fields['date_joined'].widget.attrs['readonly'] = True\n #self.fields['is_active'].required = False\n #self.fields['is_superuser'].required = False\n self.fields['zip_code'].required = False\n self.fields['birth_year'].required = False\n self.fields['gender'].required = False\n self.fields['race'].required = False\n self.fields['education'].required = False\n self.fields['income'].required = False\n self.fields['living'].required = False\n self.fields['how_discovered'].required = False\n\n def clean(self):\n email = self.cleaned_data.get('email')\n if self.fields['email'].initial != email:\n if CPIUser.objects.filter(email=email).exists():\n raise forms.ValidationError(\"A user already exists with email '%s'\" % email)\n\n return self.cleaned_data\n\n class Meta:\n #FIXME\n model = CPIUser\n exclude = ('mission_states', 'user',)\n fields = [\n 'first_name',\n 'last_name',\n 'avatar_thumb',\n 'last_login',\n 'date_joined',\n\n 'receive_email',\n 'city',\n 'zip_code',\n 'birth_year',\n 'gender',\n 'race',\n 'education',\n 'income',\n 'living',\n 'how_discovered',\n 'how_discovered_other',\n 'tagline',\n ]\n\n\nclass CPIUserAdmin(admin.ModelAdmin):\n\n #form = ProfileForm\n\n fieldsets = (\n (_('User Info'), {\n 'classes': ('grp-collapse grp-open', 'wide',),\n 'fields': ('first_name', 'last_name', 'email', 'language')\n }),\n (_('Demographic Data'), {\n 'classes': ('grp-collapse grp-closed',),\n 'fields': (\n 'city',\n 'zip_code',\n 'birth_year',\n 'gender',\n 'race',\n 'education',\n 'income',\n 'living',\n 'how_discovered',\n 'how_discovered_other',\n )\n }),\n (_('User Info Misc'), {\n 'classes': ('grp-collapse grp-closed',),\n 'fields': ('receive_email', 'avatar', 'tagline',)\n }),\n (_('Permissions'), {\n 'classes': ('grp-collapse grp-closed',),\n 'fields': ('is_active', )\n }),\n (_('Important dates'), {\n 'classes': ('grp-collapse grp-closed',),\n 'fields': ('last_login', 'date_joined')\n }),\n )\n search_fields = ('email', 'first_name', 'last_name')\n inlines = [UserProfilePerInstanceInline, ]\n #change_list_template = \"admin/change_list_filter_sidebar.html\"\n actions = ['export_accounts_for_instance_csv', 'send_email_digest']\n readonly_fields = (\n 'last_login',\n 'date_joined',\n 'city',\n 'zip_code',\n 'birth_year',\n 'gender',\n 'race',\n 'education',\n 'income',\n 'living',\n 'how_discovered',\n 'how_discovered_other',\n )\n list_display = ('first_name', 'last_name', 'email', 'language', 'date_joined')\n list_filter = ('is_active', 'is_staff', 'has_completed_profile', 'language', 'date_joined',)\n\n class Meta:\n model =CPIUser\n\n def queryset(self, request):\n qs = super(CPIUserAdmin, self).queryset(request)\n if request.user.is_superuser:\n return qs\n my_game_pks = UserProfilePerInstance.objects. \\\n filter(player_profile=request.user, is_curator=True).values_list('instance', flat=True)\n pks = UserProfilePerInstance.objects.filter(instance__pk__in=my_game_pks).values_list('player_profile', flat=True)\n return qs.filter(pk__in=pks)\n\n #def get_fieldsets(self, request, obj=None):\n # fieldsets = super(CPIUserAdmin, self).get_fieldsets(request, obj)\n # if request.user.is_superuser:\n # fieldsets[3][1]['fields'] += ('is_superuser', )\n # return fieldsets\n\n def curator_count(self, user):\n return UserProfilePerInstance.objects. \\\n filter(player_profile=user, is_curator=True).count()\n\n def changelist_view(self, request, extra_context=None):\n if request.user.is_superuser or self.curator_count(request.user) > 1:\n self.list_filter += (UserProfileGameListFilter, CuratorFilter)\n self.list_display += ('games',)\n else:\n self.list_filter = self.list_filter\n return super(CPIUserAdmin, self).changelist_view(request, extra_context)\n\n def has_change_permission(self, request, obj=None):\n return request.user.is_staff\n\n def games(self, obj):\n slugs = UserProfilePerInstance.objects.filter(player_profile=obj).values_list('instance__slug', flat=True)\n if slugs:\n return u\", \".join(filter(lambda s: s is not None, slugs))\n return \"\"\n #TODO\n # do not show all games for curators. need access to\n games.short_description = _(\"Game(s)\")\n\n def send_email_digest(self, request, queryset):\n for cpi_user in queryset:\n for up in cpi_user.my_game_profiles.\\\n exclude(instance__is_disabled=True).\\\n filter(player_profile__receive_email=True,\n player_profile__is_active=True,):\n up.send_email_digest()\n send_email_digest.short_description = \"Send email digest to selected players\"\n\n def export_accounts_for_instance_csv(self, request, queryset):\n \"\"\"\n Generic csv export admin action.\n \"\"\"\n if not request.user.is_staff:\n raise PermissionDenied\n from cpi.libs.uwsgiutils.tasks import accounts2csv\n email_to = \"admin@communityplanit.org\" #str(request.user.email)\n accounts2csv.spool(dict(email_to=email_to, game_slug=str(request.GET.get('game_slug', \"\"))))\n self.message_user(request, u'csv file with selected user profiles was sent to %s' % request.user.email)\n\n export_accounts_for_instance_csv.short_description = \"Export selected profile emails as csv file\"\n\n def save_formset(self, request, form, formset, change):\n instances = formset.save(commit=False)\n log.debug(instances)\n for instance in instances:\n log.debug(instance)\n if isinstance(instance, UserProfilePerInstance):\n #user = instance.player_profile\n if instance.is_curator:\n instance.assign_curator_permissions()\n instance.player_profile.is_staff = instance.is_curator\n instance.player_profile.save()\n log.debug(u\"marking %s as curator\" % instance.player_profile)\n instance.save()\n\nadmin.site.register(CPIUser, CPIUserAdmin)\n\n#from django.contrib.auth.admin import UserAdmin\n#class CPIUserAdmin(admin.ModelAdmin):\n# list_display = ('email', 'first_name', 'last_name')\n# ordering = ('-date_joined', )\n# search_fields = ('user__email', 'user__first_name', 'user__last_name')\n\n#admin.site.register(CPIUser, CPIUserAdmin)\n","sub_path":"src/cpi/apps/accounts/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":13310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"278088638","text":"# -*- coding: utf-8 -*-\n#\n# (c) Copyright IBM Corp. 2010, 2020. All Rights Reserved.\n#\n# Util classes for Splunk\n#\n\nimport splunklib.client as splunk_client\nimport splunklib.results as splunk_results\nimport time\nimport requests\nfrom xml.dom import minidom\nimport json\nimport sys\n\nif sys.version_info.major < 3:\n import urllib as urlparse\nelse:\n import urllib.parse as urlparse\n\nimport logging\nLOG = logging.getLogger(__name__)\n\n# Constants\nSPLUNK_SECTION=\"splunk_integration\"\n\n\nclass SearchFailure(Exception):\n \"\"\" Search failed to execute \"\"\"\n def __init__(self, search_id, search_status):\n fail_msg = \"Query [{}] failed with status [{}]\".format(search_id, search_status)\n super(SearchFailure, self).__init__(fail_msg)\n self.search_status = search_status\n\n\nclass SearchTimeout(Exception):\n \"\"\" Query failed to complete in time specified \"\"\"\n def __init__(self, search_id, search_status):\n fail_msg = \"Query [{}] timed out. Final Status was [{}]\".format(search_id, search_status)\n super(SearchTimeout, self).__init__(fail_msg)\n self.search_status = search_status\n\n\nclass SearchJobFailure(Exception):\n \"\"\" Search job creation failure\"\"\"\n def __init__(self, query):\n fail_msg = u\"Failed to create search job for query [{}] \".format(query)\n super(SearchJobFailure, self).__init__(fail_msg)\n\n\nclass RequestError(Exception):\n \"\"\" Request error\"\"\"\n def __init__(self, url, message):\n fail_msg = u\"Request to url [{}] throws exception. Error [{}]\".format(url, message)\n super(RequestError, self).__init__(fail_msg)\n\n\nclass DeleteError(Exception):\n \"\"\" Request error\"\"\"\n def __init__(self, url, message):\n fail_msg = u\"Delete request to url [{}] throws exception. Error [{}]\".format(url, message)\n super(DeleteError, self).__init__(fail_msg)\n\n\nclass SplunkClient(object):\n \"\"\" Wrapper of splunklib.client\"\"\"\n\n # member variables\n splunk_service = None\n time_out = 600\n polling_interval = 5\n max_return = 0\n\n def __init__(self, host, port, username, password, verify=True):\n \"\"\"Init splunk_service\"\"\"\n self.splunk_service = self.connect(host, port, username, password, verify)\n\n @staticmethod\n def connect(host, port, username, password, verify):\n \"\"\"\n Connect to Splunk\n :param host: hostname for splunk\n :param port: port for splunk\n :param username: user name to login\n :param password: password to login\n :param verify: True to validate the SSL cert\n :return:\n \"\"\"\n LOG.info(\"Splunk SDK verify flag is {}\".format(verify))\n return splunk_client.connect(host=host,\n port=port,\n username=username,\n password=password,\n verify=verify)\n\n def set_timeout(self, timeout):\n self.time_out = timeout\n\n def set_polling_interval(self, pollinginterval):\n self.polling_interval = pollinginterval\n\n def set_max_return(self, max):\n self.max_return = max\n\n def start_search(self, query, job_ttl=None):\n \"\"\"Start a search for a query\"\"\"\n\n query_args = {\"search_mode\": \"normal\",\n \"enable_lookups\": True}\n if self.max_return:\n query_args[\"max_count\"] = self.max_return\n\n job = None\n try:\n job = self.splunk_service.jobs.create(query, **query_args)\n if job_ttl:\n job.set_ttl(job_ttl)\n except Exception as e:\n LOG.exception(\"Search job creation failed\")\n #\n # If we failed to create a search job, it does not make sense to go further\n #\n raise SearchJobFailure(query)\n\n return job\n\n def execute_query(self, query):\n \"\"\"\n Execute splunk query\n :param query: query string\n :return:\n \"\"\"\n result = dict()\n\n LOG.debug(u\"Query: {}\" .format(query))\n\n splunk_job = self.start_search(query)\n\n # Poll Splunk for result\n start_time = time.time()\n done = False\n\n while not done:\n if not splunk_job.is_ready():\n pass\n else:\n splunk_job.refresh()\n done = splunk_job[\"dispatchState\"] in (\"FAILED\", \"DONE\")\n\n stats = {\"name\": splunk_job.name,\n \"isDone\": splunk_job.isDone,\n \"scanCount\": int(splunk_job[\"scanCount\"]),\n \"eventCount\": int(splunk_job[\"eventCount\"]),\n \"doneProgress\": float(splunk_job[\"doneProgress\"]) * 100,\n \"resultCount\": int(splunk_job[\"resultCount\"])}\n\n status = (\"\\r%(doneProgress)03.1f%% %(scanCount)d scanned \"\n \"%(eventCount)d matched %(resultCount)d results\") % stats\n\n LOG.debug(status)\n\n if not done:\n if self.time_out!= 0:\n if time.time() - start_time > self.time_out:\n #\n # old sdk\n #splunk_client.cancel_search(splunk_job)\n #\n splunk_job.cancel()\n raise SearchTimeout(splunk_job.name, splunk_job[\"dispatchState\"])\n LOG.debug(\"Sleeping for %s\", self.polling_interval)\n time.sleep(self.polling_interval)\n\n if splunk_job[\"dispatchState\"] != \"DONE\" or splunk_job[\"isFailed\"] == True:\n if sys.version_info.major < 3:\n raise SearchFailure(splunk_job.name, splunk_job[\"dispatchState\"] + u\", \" + unicode(splunk_job[\"messages\"]))\n else:\n # strings in python3 are unicode\n raise SearchFailure(splunk_job.name, splunk_job[\"dispatchState\"] + u\", \" + str(splunk_job[\"messages\"]))\n\n reader = splunk_results.ResultsReader(splunk_job.results())\n result = {\"events\": list([dict(row) for row in reader])}\n\n return result\n\n\nclass SplunkUtils(object):\n \"\"\" Use python requests to call Splunk REST API\"\"\"\n\n # Member variables\n session_key = \"\"\n base_url = \"\"\n SUPPORTED_THREAT_TYPE = [\"ip_intel\", \"file_intel\", \"user_intel\", \"http_intel\",\n \"email_intel\", \"service_intel\", \"process_intel\",\n \"registry_intel\", \"certificate_intel\"]\n\n def __init__(self, host, port, username, password, verify):\n self.base_url = \"https://{}:{}\".format(host, port)\n self.get_session_key(username, password, verify)\n\n def get_session_key(self, username, password, verify):\n \"\"\"\n Get session_key from Splunk server\n :param username: user name for splunk login\n :param password: password for splunk login\n :param verify: verify HTTPS cert or not\n :return:\n \"\"\"\n\n headers = dict()\n headers[\"Accept\"] = \"application/html\"\n url = self.base_url + \"/services/auth/login\"\n try:\n resp = requests.post(url,\n headers=headers,\n data=urlparse.urlencode({\"username\": username,\n \"password\": password}),\n verify=verify)\n #\n # This one we only allows 200. Otherwise login failed\n #\n if resp.status_code == 200:\n # docs.splunk.com/Documentation/Splunk/7.0.2/RESTTUT/RESTsearches\n self.session_key = minidom.parseString(resp.content).getElementsByTagName(\"sessionKey\")[0].childNodes[\n 0].nodeValue\n else:\n error_msg = \"Splunk login failed for user {} with status {}\".format(username, resp.status_code)\n raise RequestError(url, error_msg)\n except Exception as e:\n raise e\n\n return\n\n def update_notable(self, event_id, comment, status, cafile):\n \"\"\"\n Update notable event\n :param event_id: event_id for notable event to be updated\n :param comment: comment to add to the notable event\n :param status: status of the notable event to change to\n :param cafile: Verify HTTPS cert or not\n :return:\n \"\"\"\n\n headers = dict()\n headers[\"Authorization\"] = \"Splunk {}\".format(self.session_key)\n\n args = dict()\n args[\"comment\"] = comment\n args[\"status\"] = status\n args[\"ruleUIDs\"] = [event_id]\n\n ret = None\n url = self.base_url + \"/services/notable_update\"\n\n try:\n resp = requests.post(url,\n headers=headers,\n data=args,\n verify=cafile)\n\n #\n # We shall just return the response in json and let the post process\n # to make decision.\n #\n ret = {\"status_code\": resp.status_code,\n \"content\": resp.json()}\n\n except requests.ConnectionError as e:\n raise RequestError(url, \"Connection error. \" + str(e))\n except requests.HTTPError as e:\n raise RequestError(url, \"An HTTP error. \" + str(e))\n except requests.URLRequired as e:\n raise RequestError(url, \"An valid URL is required.\")\n except requests.TooManyRedirects as e:\n raise RequestError(url, \"Too many redirects\")\n except requests.RequestException as e:\n raise RequestError(url, \"Ambiguous exception when handling request. \" + str(e))\n return ret\n\n def delete_threat_intel_item(self, threat_type, item_key, cafile):\n \"\"\"\n Delete an item from the threat_intel collections.\n :param threat_type: ip_intel, file_intel, user_intel, http_intel, email_intel, service_intel\n process_intel, registry_intel, or certificate_intel\n :param item_key: the _key for ite to delete\n :param cafile: CA cert or False to skip cert verification\n :return:\n \"\"\"\n\n headers = dict()\n headers[\"Authorization\"] = \"Splunk {}\".format(self.session_key)\n url = \"{0}/services/data/threat_intel/item/{1}/{2}\".format(self.base_url, threat_type, item_key)\n\n if threat_type not in self.SUPPORTED_THREAT_TYPE:\n raise RequestError(url, \"{} is not supported\")\n\n ret = {}\n try:\n resp = requests.delete(url,\n headers=headers,\n verify=cafile)\n #\n # We shall just return the response in json and let the post process\n # to make decision.\n #\n ret = {\"status_code\": resp.status_code,\n \"content\": resp.json()}\n\n except Exception as e:\n raise DeleteError(url, u\"Failed to delete: {}\".format(str(e)))\n\n return ret\n\n def add_threat_intel_item(self, threat_type, threat_dict, cafile):\n \"\"\"\n Add a new threat intel item to the ThreatIntelligence collections\n :param threat_type: ip_intel, file_intel, user_intel, http_intel, email_intel, service_intel\n process_intel, registry_intel, or certificate_intel\n :param threat_dict:\n :param cafile:\n :return:\n \"\"\"\n headers = dict()\n headers[\"Authorization\"] = \"Splunk {}\".format(self.session_key)\n\n url = self.base_url + \"/services/data/threat_intel/item/\" + threat_type\n\n if threat_type not in self.SUPPORTED_THREAT_TYPE:\n raise RequestError(url, \"{} is not supported\")\n\n item = {\"item\": json.dumps(threat_dict)}\n\n try:\n resp = requests.post(url,\n headers=headers,\n data=item,\n verify=cafile)\n\n #\n # We shall just return the response in json and let the post process\n # to make decision.\n #\n ret = {\"status_code\": resp.status_code,\n \"content\": resp.json()}\n\n except requests.ConnectionError as e:\n raise RequestError(url, \"Connection error. \" + str(e))\n except requests.HTTPError as e:\n raise RequestError(url, \"An HTTP error. \" + str(e))\n except requests.URLRequired as e:\n raise RequestError(url, \"An valid URL is required.\")\n except requests.TooManyRedirects as e:\n raise RequestError(url, \"Too many redirects\")\n except requests.RequestException as e:\n raise RequestError(url, \"Ambiguous exception when handling request. \" + str(e))\n return ret\n","sub_path":"fn_splunk_integration/fn_splunk_integration/util/splunk_utils.py","file_name":"splunk_utils.py","file_ext":"py","file_size_in_byte":12856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"254360861","text":"\"\"\"\nAuthors: Tim Bedin, Ricardo Pascual, Tim Erwin\n\nCopyright 2014 CSIRO\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\nhttp://www.apache.org/licenses/LICENSE-2.0\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\nThis module contains the ProcessUnit class.\n\n\"\"\"\n\nimport os\nimport logging\nimport string\n\nfrom cwsl.configuration import configuration\nfrom cwsl.utils import utils\nfrom cwsl.core.argument_creator import ArgumentCreator\nfrom cwsl.core.file_creator import FileCreator\nfrom cwsl.core.constraint import Constraint\nfrom cwsl.core.scheduler import SimpleExecManager\n\n\nmodule_logger = logging.getLogger('cwsl.core.process_unit')\n\n\nclass ProcessUnit(object):\n \"\"\" This class sets up the execution of an operation performed on input DataSets.\n\n This class takes in a list of input DataSets, an output\n pattern to write files to, the shell command that needs to be\n run and any output_constraints that need to be added to the\n output.\n\n It sets up the ArgumentCreator classes required to\n run a particular, single processing operation for all of\n the possible combinations in the input and output DataSets.\n\n This could be done using a VisTrails module directly, but doing it like\n this allows us to separate the GUI presentation from the execution.\n\n \"\"\"\n\n def __init__(self, inputlist, output_pattern, shell_command,\n extra_constraints=None, map_dict=None, cons_keywords=None,\n positional_args=None, execution_options=None, kw_string=None,\n merge_output=None):\n\n \"\"\"\n Arguments:\n\n inputlist: A list of input DataSets to get files from.\n\n output_pattern: A filename pattern to use for data output.\n\n shell_command: The base shell command for the process to run.\n\n Optional:\n\n extra_constraints: Extra constraints to be applied to the output.\n\n map_dict: a dictionary linking constraint names in the input DataSets\n to new constraints in the output. e.g.\n if map_dict = {\"obs-model\": (\"model\", 0)} then the \"model\" constraint in the\n input position 0 is renamed to be the \"obs-model\" Constraint in the output.\n\n cons_keywords: Used in building the command to be run, if a constraint has to\n be used as a keyword argument.\n\n positional_args: Used in building the command to be run, if a constraint\n has to be used as a positional argument.\n\n execution_options: A dictionary to pass options like required queues, walltime,\n required modules etc. to the process unit. Currently only\n required_modules is implemented.\n\n kw_string: A string used for composite constraint keyword arguments, i.e.\n using multiple attribute values in a single keyword argument.\n example - kw_string=\"--title $model_$variable\"\n\n \"\"\"\n\n if map_dict:\n self.map_dict = map_dict\n else:\n self.map_dict = {}\n\n self.merge_output = merge_output\n\n self.mapped_con_names = [cons_name for cons_name in self.map_dict]\n\n self.inputlist = inputlist\n self.shell_command = shell_command\n\n # To avoid mutable defaults problems, set\n # Nones to empty dicts.\n if execution_options:\n self.execution_options = execution_options\n else:\n self.execution_options = {}\n if cons_keywords:\n self.cons_keywords = cons_keywords\n else:\n self.cons_keywords = {}\n if positional_args:\n self.positional_args = positional_args\n else:\n self.positional_args = {}\n\n if kw_string:\n self.kw_string = kw_string\n else:\n self.kw_string = None\n\n # The initial Constraints are built from the output file pattern.\n pattern_constraints = set(FileCreator.constraints_from_pattern(output_pattern))\n\n mapped_constraints = self.apply_mappings(pattern_constraints)\n\n # Apply extra constraints given in the constructor.\n filled_constraints = self.fill_constraints_from_extras(mapped_constraints,\n extra_constraints)\n\n # Finallly fill the empty output constraints from the input DataSets.\n self.final_constraints = self.fill_from_input(self.inputlist, filled_constraints)\n module_logger.debug(\"Final output constraints are: {0}\".format(self.final_constraints))\n\n for ds in inputlist:\n module_logger.debug(\"Input constraints are: {}\"\n .format(ds.constraints))\n\n\n # Make a file_creator from the new, fixed constraints.\n self.file_creator = FileCreator(output_pattern, self.final_constraints)\n\n def apply_mappings(self, constraints):\n\n module_logger.debug(\"Before applying mappings, output_constraints are: {}\"\n .format(constraints))\n\n to_remove = []\n for map_name, map_spec in self.map_dict.items():\n # First update the outputs with values from the input.\n found_con = self.inputlist[map_spec[1]].get_constraint(map_spec[0])\n constraints.add(Constraint(map_name, found_con.values))\n # Remove the empty constraint.\n constraints.remove(Constraint(map_name, []))\n\n # Update the subsets dictionary for the input.\n # this will fail for a FileCreator.\n try:\n for value in found_con.values:\n module_logger.debug(\"Updating subsets for {}: {}\"\n .format(map_name, value))\n found_files = self.inputlist[map_spec[1]].get_files({found_con.key: value})\n module_logger.debug(\"Found files are: {}\".format(found_files))\n self.inputlist[map_spec[1]].subsets[map_name][value] = [file_ob.full_path\n for file_ob in found_files]\n except AttributeError:\n pass\n\n # Added the mapped constraint to the input self.cons_names\n self.inputlist[map_spec[1]].cons_names.append(map_name)\n # Removed the now obsolete constraint.\n self.inputlist[map_spec[1]].cons_names.remove(map_spec[0])\n\n # Now alter the valid combinations of the input.\n fixed_combinations = set([])\n for combination in self.inputlist[map_spec[1]].valid_combinations:\n module_logger.debug(\"Original combination is: {}\".format(combination))\n new_list = []\n for constraint in combination:\n if constraint.key == map_spec[0]:\n new_list.append(Constraint(map_name, constraint.values))\n new_list.append(constraint)\n module_logger.debug(\"New combination is: {}\".format(new_list))\n fixed_combinations.add(frozenset(new_list))\n self.inputlist[map_spec[1]].valid_combinations = fixed_combinations\n\n module_logger.debug(\"After applying mappings, output_constraints are: {}\"\n .format(constraints))\n\n return constraints\n\n\n def fill_from_input(self, inputlist, constraints):\n\n module_logger.debug(\"Before filling from input, output_constraints are: {}\"\n .format(constraints))\n\n new_cons = set([])\n to_remove = []\n for cons in constraints:\n if not cons.values:\n module_logger.debug(\"Trying to fill constraint: {}\"\n .format(cons))\n found_cons = set([input_ds.get_constraint(cons.key)\n for input_ds in inputlist\n if input_ds.get_constraint(cons.key)])\n\n module_logger.debug(\"Found constraints: {}\"\n .format(found_cons))\n new_cons = new_cons.union(found_cons)\n to_remove.append(cons)\n\n for cons in to_remove:\n constraints.remove(cons)\n\n constraints = constraints.union(new_cons)\n\n module_logger.debug(\"After filling from input, output_constraints are: {}\"\n .format(constraints))\n\n return constraints\n\n def fill_constraints_from_extras(self, constraints,\n extra_constraints):\n \"\"\" Add extra constraints to a set of constraints.\"\"\"\n\n\n if extra_constraints is None:\n extra_constraints = []\n\n module_logger.debug(\"Before filling from extras, output constraints: {}\"\n .format(constraints))\n module_logger.debug(\"Extra constraints to fill are: {}\"\n .format(extra_constraints))\n\n # Make sure we are not overwriting with empty constraints.\n for cons in extra_constraints:\n if not cons.values:\n raise EmptyOverwriteError(\"Constraint {} is being used to overwrite\"\n .format(cons))\n\n # Find the empty constraints to fill.\n empty_cons_names = [cons.key for cons in constraints\n if not cons.values]\n module_logger.debug(\"Attempting to fill: {}\"\n .format(empty_cons_names))\n\n # Lists to hold constraints to add or remove.\n to_add = []\n to_remove = []\n\n for cons in extra_constraints:\n # Add the extra_constraints if they are found in the output.\n if cons.key in empty_cons_names:\n to_add.append(cons)\n # Remove the empty.\n to_remove += [bad_cons for bad_cons in constraints\n if bad_cons.key == cons.key]\n\n for cons in to_remove:\n constraints.remove(cons)\n for cons in to_add:\n constraints.add(cons)\n\n module_logger.debug(\"After filling from extras, output constraints: {}\"\n .format(constraints))\n\n return constraints\n\n def execute(self, simulate=False):\n \"\"\" This method runs the actual process.\n\n This method returns a FileCreator to be used\n as input to the next VisTrails module.\n\n \"\"\"\n\n # Check that cws_ctools_path is set\n if not configuration.cwsl_ctools_path:\n raise Exception(\"cwsl_ctools_path is not set in package options\")\n\n configuration.cwsl_ctools_path = os.path.expandvars(configuration.cwsl_ctools_path)\n if not os.path.exists(configuration.cwsl_ctools_path):\n raise Exception(\"Path: {} for cwsl_ctools_path does not exist\"\n .format(configuration.cwsl_ctools_path))\n\n # We now create a looper to compare all the input Datasets with\n # the output FileCreator.\n this_looper = ArgumentCreator(self.inputlist, self.file_creator, self.merge_output)\n\n # TODO determine scheduler from user options.\n scheduler = SimpleExecManager(noexec=simulate)\n\n if self.execution_options.has_key('required_modules'):\n scheduler.add_module_deps(self.execution_options['required_modules'])\n\n # Add environment variables to the script and the current environment.\n scheduler.add_environment_variables({'CWSL_CTOOLS':configuration.cwsl_ctools_path})\n os.environ['CWSL_CTOOLS'] = configuration.cwsl_ctools_path\n scheduler.add_python_paths([os.path.join(configuration.cwsl_ctools_path,'pythonlib')])\n\n # For every valid possible combination, apply any positional and\n # keyword args, then add the command to the scheduler.\n for combination in this_looper:\n if combination:\n\n in_files, out_files = self.get_fullnames((combination[0], combination[1]))\n this_dict = combination[2]\n\n base_cmd_list = [self.shell_command] + in_files + out_files\n\n # Now apply any keyword arguments and positional args.\n keyword_command_list = self.apply_keyword_args(base_cmd_list, this_dict)\n positional_list = self.apply_positional_args(keyword_command_list, this_dict)\n final_command_list = self.apply_kwstring(positional_list, this_dict)\n\n # Generate the annotation string.\n try:\n annotation = utils.build_metadata(final_command_list)\n except NameError:\n annotation = None\n\n # The subprocess / queue submission is done here.\n scheduler.add_cmd(final_command_list, out_files, annotation=annotation)\n\n scheduler.submit()\n\n # The scheduler is kept for testing purposes.\n self.scheduler = scheduler\n\n return self.file_creator\n\n def apply_keyword_args(self, command_list, kw_cons_dict, prefix='--'):\n \"\"\" Add keywords from the keyword constraint dictionary to the command list.\"\"\"\n\n for keyword in self.cons_keywords:\n associated_cons_name = self.cons_keywords[keyword]\n this_att_value = kw_cons_dict[associated_cons_name]\n\n command_list.append(prefix + keyword + ' ' + this_att_value)\n\n return command_list\n\n def apply_kwstring(self, command_list, cons_dict):\n \"\"\" Use a string template to build a composite keyword argument. \"\"\"\n\n if not self.kw_string:\n return command_list\n\n outstring = string.Template(self.kw_string).substitute(cons_dict)\n\n command_list.append(outstring)\n\n return(command_list)\n\n\n def apply_positional_args(self, arg_list, constraint_dict):\n \"\"\" Add positional args to a list of command arguments. \"\"\"\n\n for arg_tuple in self.positional_args:\n arg_name = arg_tuple[0]\n position = arg_tuple[1]\n\n try:\n if arg_tuple[2] == 'raw':\n this_att_value = arg_name\n except IndexError:\n this_att_value = constraint_dict[arg_name]\n\n if position != -1:\n position += 1 # +1 because arg_list[0] is the actual command!\n arg_list.insert(position, this_att_value)\n else:\n arg_list.append(this_att_value)\n\n return arg_list\n\n def get_fullnames(self, combination):\n \"\"\" Generate the full file paths, given a tuple of metafile lists.\"\"\"\n\n in_files = []\n out_file = []\n in_files += [infile.full_path for infile in combination[0]]\n\n out_file += [outfile.full_path for outfile in combination[1]]\n\n return in_files, out_file\n\n\n# Exception Classes\nclass EmptyOverwriteError(Exception):\n pass\n","sub_path":"cwsl/core/process_unit.py","file_name":"process_unit.py","file_ext":"py","file_size_in_byte":15297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"297638151","text":"import os\nimport tarfile\nfrom six.moves import urllib\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\nDOWNLOAD_ROOT = \"https://raw.githubusercontent.com/ageron/handson-ml/master/\"\nHOUSING_PATH = os.path.join(\"datasets\", \"housing\")\nHOUSING_URL = DOWNLOAD_ROOT + \"datasets/housing/housing.tgz\"\n\n\ndef fetch_housing_data(housing_url=HOUSING_URL, housing_path=HOUSING_PATH):\n if not os.path.isdir(housing_path):\n os.makedirs(housing_path)\n tgz_path = os.path.join(housing_path, \"housing.tgz\")\n urllib.request.urlretrieve(housing_url, tgz_path)\n housing_tgz = tarfile.open(tgz_path)\n housing_tgz.extractall(path=housing_path)\n housing_tgz.close()\n\n\ndef load_housing_data(housing_path=HOUSING_PATH):\n csv_path = os.path.join(housing_path, \"housing.csv\")\n return pd.read_csv(csv_path)\n\n\nfetch_housing_data()\nhousing = load_housing_data()\n\nhousing\nhousing.info()\nhousing.describe().transpose().round(2)\n\nhousing[\"ocean_proximity\"].value_counts()\nhousing.groupby('ocean_proximity').size()\n\n\nhousing.hist(bins=50, figsize=(20,15))\nplt.show()\n\n\ndef split_train_test(data, test_ratio):\n shuffled_indices = np.random.permutation(len(data))\n test_set_size = int(len(data) * test_ratio)\n test_indices = shuffled_indices[:test_set_size]\n train_indices = shuffled_indices[test_set_size:]\n return data.iloc[train_indices], data.iloc[test_indices]\n\n\nhousing_train, housing_test = split_train_test(housing, 0.2)\n\"train: {}, test: {}, all: {}\".format(len(housing_train), len(housing_test),\n len(housing))\n\n# how np.random.permutation works\nnp.random.permutation(10)\nnp.random.seed(42) # trzeba ustawić przed każdą operacją\nnp.random.permutation(10)\n# ----\n\nfrom sklearn.model_selection import train_test_split\n\ntrain_set, test_set = train_test_split(housing, test_size=0.2, random_state=42)\n\"train: {}, test: {}, all: {}\".format(len(train_set), len(test_set),\n len(housing))\n\nhousing['median_income'].hist(bins=20, figsize=(4,3))\nplt.show()\n\nhousing[\"income_cat\"] = np.ceil(housing[\"median_income\"] / 1.5)\nhousing[\"income_cat\"].where(housing[\"income_cat\"] < 5, 5.0, inplace=True)\n\n\nfrom sklearn.model_selection import StratifiedShuffleSplit\n\nsplit = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)\nfor train_index, test_index in split.split(housing, housing[\"income_cat\"]):\n strat_train_set = housing.loc[train_index]\n strat_test_set = housing.loc[test_index]\n\n\"train: {}, test: {}, all: {}\".format(len(strat_train_set),\n len(strat_test_set), len(housing))\n\nhousing['income_cat'].value_counts() / len(housing)\nstrat_train_set['income_cat'].value_counts() / len(strat_train_set)\nstrat_test_set['income_cat'].value_counts() / len(strat_test_set)\n\nfor set in (strat_train_set, strat_test_set):\n set.drop([\"income_cat\"], axis=1, inplace=True)\n\n\nhousing = strat_train_set.copy()\nhousing.plot(kind='scatter', x='longitude', y='latitude', alpha=0.1)\nplt.show()\n\nhousing.plot(kind=\"scatter\", x=\"longitude\", y=\"latitude\", alpha=0.4,\n s=housing[\"population\"]/100, label=\"population\",\n c=\"median_house_value\", cmap=plt.get_cmap(\"jet\"), colorbar=True,\n)\nplt.legend()\nplt.show()\n\n\ncorr_matrix = housing.corr().round(2)\ncorr_matrix['median_house_value'].sort_values(ascending=False)\n\nfrom pandas.tools.plotting import scatter_matrix\nattributes = [\"median_house_value\", \"median_income\", \"total_rooms\",\n\"housing_median_age\"]\nscatter_matrix(housing[attributes], figsize=(12, 8))\nplt.show()\nhousing.info()\nhousing.total_bedrooms.hist()\nplt.show()\n\nhousing[\"rooms_per_household\"] = housing[\"total_rooms\"]/housing[\"households\"]\nhousing[\"bedrooms_per_room\"] = housing[\"total_bedrooms\"]/housing[\"total_rooms\"]\nhousing[\"population_per_household\"]=housing[\"population\"]/housing[\"households\"]\n\ncorr_matrix = housing.corr()\ncorr_matrix['median_house_value'].sort_values(ascending=False)\n\n\nhousing = strat_train_set.copy()\nhousing = strat_train_set.drop(\"median_house_value\", axis=1)\nhousing_labels = strat_train_set[\"median_house_value\"].copy()\n\nhousing['total_bedrooms'].isna().value_counts()\n\nfrom sklearn.preprocessing import Imputer\n\nimputer = Imputer(strategy='median')\nhousing_num = housing.drop(\"ocean_proximity\", axis=1)\nimputer.fit(housing_num)\nimputer.statistics_\nall(imputer.statistics_ == housing_num.median().values)\nX = imputer.transform(housing_num)\nhousing_tr = pd.DataFrame(X, columns=housing_num.columns)\n\nfrom sklearn.preprocessing import LabelEncoder\nencoder = LabelEncoder()\nhousing_cat = housing['ocean_proximity']\nhousing_cat_encoded = encoder.fit_transform(housing_cat)\nencoder.classes_\n\nfrom sklearn.preprocessing import OneHotEncoder\nencoder = OneHotEncoder()\nhousing_cat_1hot = encoder.fit_transform(housing_cat_encoded.reshape(-1, 1))\nhousing_cat_1hot.toarray()\n\nfrom sklearn.preprocessing import LabelBinarizer\nencoder = LabelBinarizer()\nhousing_cat_1hot = encoder.fit_transform(housing_cat)\nhousing_cat_1hot\n\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler\nstd_scaler = StandardScaler()\nminmax_scaler = MinMaxScaler()\nstd_scaler.fit_transform(housing[['housing_median_age']])\nminmax_scaler.fit_transform(housing[['median_house_value']])\n\nfrom sklearn.pipeline import Pipeline, FeatureUnion\nfrom sklearn.preprocessing import StandardScaler\n\nfrom sklearn.base import BaseEstimator, TransformerMixin\n\n\nclass DataFrameSelector(BaseEstimator, TransformerMixin):\n def __init__(self, attribute_names):\n self.attribute_names = attribute_names\n def fit(self, X, y=None):\n return self\n def transform(self, X):\n return X[self.attribute_names].values\n\nnum_attribs = list(housing_num)\ncat_attribs = [\"ocean_proximity\"]\n\nnum_pipeline = Pipeline([\n ('selector', DataFrameSelector(num_attribs)),\n ('imputer', Imputer(strategy='median')),\n ('std_scaler', StandardScaler()),\n])\n\ncat_pipeline = Pipeline([\n ('selector', DataFrameSelector(cat_attribs)),\n ('label_binarizer', LabelBinarizer()),\n])\n\nfull_pipeline = FeatureUnion(transformer_list=[\n (\"num_pipeline\", num_pipeline),\n (\"cat_pipeline\", cat_pipeline),\n])\n\n# does not work for scikit-learn==0.19.0\n# works for pip install scikit-learn==0.18.0\nhousing_prepared = full_pipeline.fit_transform(housing)\n\n\nfrom sklearn.linear_model import LinearRegression\n\nlin_reg = LinearRegression()\nlin_reg.fit(housing_prepared, housing_labels)\n\nsome_data = housing.iloc[:5]\nsome_labels = housing_labels.iloc[:5]\n\nsome_data_prepared = full_pipeline.transform(some_data)\nlin_reg.predict(some_data_prepared)\nlist(some_labels)\n\n\nfrom sklearn.metrics import mean_squared_error\nhousing_predictions = lin_reg.predict(housing_prepared)\nlin_mse = mean_squared_error(housing_labels, housing_predictions)\nlin_rmse = np.sqrt(lin_mse)\nlin_rmse\n\nfrom sklearn.tree import DecisionTreeRegressor\ntree_reg = DecisionTreeRegressor()\ntree_reg.fit(housing_prepared, housing_labels)\nhousing_predictions = tree_reg.predict(housing_prepared)\ntree_mse = mean_squared_error(housing_labels, housing_predictions)\ntree_rmse = np.sqrt(tree_mse)\ntree_rmse\n\nfrom sklearn.model_selection import cross_val_score\nscores = cross_val_score(tree_reg, housing_prepared, housing_labels,\n scoring='neg_mean_squared_error', cv=10)\ntree_rmse_scores = np.sqrt(-scores)\n\nfrom sklearn.ensemble import RandomForestRegressor\nforest_reg = RandomForestRegressor()\nforest_reg.fit(housing_prepared, housing_labels)\nforest_predictions = forest_reg.predict(housing_prepared)\nforest_mse = mean_squared_error(housing_labels, forest_predictions)\nforest_rmse = np.sqrt(forest_mse)\nforest_rmse\n\nfrom sklearn.externals import joblib\njoblib.dump(forest_reg, \"forest.pkl\")\nforest_mm = joblib.load(\"forest.pkl\")\n\nfrom sklearn.model_selection import GridSearchCV\n\nparam_grid = [\n {'n_estimators': [3, 10, 30], 'max_features': [2, 4, 6, 8]},\n {'bootstrap': [False], 'n_estimators': [3, 10], 'max_features': [2, 3, 4]}\n]\nforest_reg = RandomForestRegressor()\ngrid_search = GridSearchCV(forest_reg, param_grid, cv=5,\n scoring='neg_mean_squared_error')\ngrid_search.fit(housing_prepared, housing_labels)\ngrid_search.best_params_\ngrid_search.best_estimator_\n","sub_path":"scratchpad/machine_learning/chapter2.py","file_name":"chapter2.py","file_ext":"py","file_size_in_byte":8234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"557187355","text":"from backend.corpora.common.corpora_orm import (\n DatasetArtifactFileType,\n DatasetArtifactType,\n DbDatasetProcessingStatus,\n UploadStatus,\n)\nfrom backend.corpora.common.entities import Dataset\nfrom backend.corpora.common.utils.db_session import processing_status_updater\nfrom backend.corpora.lambdas.upload_failures.upload import update_dataset_processing_status_to_failed\nfrom tests.unit.backend.corpora.common.entities.datasets import TestDataset\nfrom tests.unit.backend.utils import BogusProcessingStatusParams, BogusDatasetParams\n\n\nclass TestUpdateDataset(TestDataset):\n def test__update__ok(self):\n artifact_params = dict(\n filename=\"filename_1\",\n filetype=DatasetArtifactFileType.H5AD,\n type=DatasetArtifactType.ORIGINAL,\n user_submitted=True,\n s3_uri=\"some_uri\",\n )\n processing_status = BogusProcessingStatusParams.get()\n dataset_params = BogusDatasetParams.get()\n\n dataset = Dataset.create(\n self.session,\n **dataset_params,\n artifacts=[artifact_params],\n processing_status=processing_status,\n )\n\n new_artifact_params = dict(\n filename=\"a_different_filename\",\n filetype=DatasetArtifactFileType.LOOM,\n type=DatasetArtifactType.ORIGINAL,\n user_submitted=False,\n s3_uri=\"a_different_uri\",\n )\n new_processing_status = BogusProcessingStatusParams.get(upload_progress=7 / 9)\n\n dataset.update(\n artifacts=[new_artifact_params],\n processing_status=new_processing_status,\n sex=[\"other\"],\n )\n self.session.expire_all()\n actual_dataset = Dataset.get(self.session, dataset.id)\n self.assertEqual(actual_dataset.artifacts[0].filename, \"a_different_filename\")\n self.assertEqual(actual_dataset.sex, [\"other\"])\n self.assertEqual(actual_dataset.processing_status.upload_progress, 7 / 9)\n\n def test__update_processing_status__ok(self):\n dataset = Dataset.get(self.session, self.uuid)\n status = {\n DbDatasetProcessingStatus.upload_progress: 0,\n DbDatasetProcessingStatus.upload_status: UploadStatus.WAITING,\n }\n\n processing_status_updater(self.session, dataset.processing_status.id, status)\n\n dataset = Dataset.get(self.session, self.uuid)\n self.assertEqual(dataset.processing_status.upload_status, UploadStatus.WAITING)\n update_dataset_processing_status_to_failed(self.uuid)\n self.session.expire_all()\n\n dataset = Dataset.get(self.session, self.uuid)\n self.assertEqual(dataset.processing_status.upload_status, UploadStatus.FAILED)\n\n def test__update_processing_status__no_dataset__ok(self):\n update_dataset_processing_status_to_failed(\"fake_uuid\")\n","sub_path":"tests/unit/backend/corpora/common/entities/datasets/test_update.py","file_name":"test_update.py","file_ext":"py","file_size_in_byte":2857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"265166868","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n# Fixing random state for reproducibility\nnp.random.seed(19680801)\n\nn_individuos = 3\nn_generaciones = 500\nN = 50\n\nx = []\ny = []\ncolors = []\n\nfor i in range(n_generaciones):\n error = np.load('error'+str(i)+'.npy') / 12000\n color = np.random.rand(N)\n for j in range(n_individuos):\n x.append(i)\n y.append(error[len(error)-1-j])\n colors.append(color)\n\n\n\nx = np.array(x)\ny = np.array(y)\ncolors = np.array(colors)\narea = 200*np.ones(len(x)) # 0 to 15 point radii\n\nplt.scatter(x, y, c=colors, s=area,alpha=0.5)\nplt.ylim(0.05,0.25)\nplt.xlim(-5,150)\n\nplt.ylabel(\"Error cometido\")\nplt.xlabel(\"Numero de Generacion\")\nplt.title('Tuneado de parametros PID - AG')\n\nplt.show()\n","sub_path":"PYTHONSOURCE/APP/pintaGA.py","file_name":"pintaGA.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"576244647","text":"'''\n-------------------------------------------\nName: Strawbretty\n-------------------------------------------\nUsing the OLED driver and IMU\n-------------------------------------------\n'''\n\nimport pyb\nfrom pyb import LED, ADC, Pin\nfrom oled_938 import OLED_938\nfrom mpu6050 import MPU6050\n\n# Create peripheral objects\nb_led = LED(4)\nimu = MPU6050(1, False)\n\n# I2C connected to Y9, Y10, (I2C bus 2) and Y11 is reset low active\noled = OLED_938(pinout={'sda': 'Y10', 'scl': 'Y9', 'res': 'Y8'}, height=64, external_vcc=False, i2c_devid=61)\n\noled.poweron()\noled.init_display()\n\nwidth = 128\nmessage = 'Bluberry'\nlength = len(message)*6\noffset = (width-length)/2\n# Simple hello world message\noled.draw_text(int(offset),0,message) # 6x8 per letter\n\n\nwhile True:\n b_LED.toggle()\n\n pitch = imu.pitch( ) # Returns the pitch angle in degrees.\n gy_dot = imu.get_gy( ) # Returns d(pitch)/dt in degrees/sec.\n\n roll = imu.roll( ) # Returns the roll angle in degrees.\n gx_dot = imu.get_gx( ) # Returns d(roll)/dt in degrees/sec.\n\n\n oled.draw_text(0,20,'Pitch angle:{:6.3f}'.format(pitch) )\n oled.draw_text(0,40,'Pitch rate:{:6.3f}'.format(gy_dot) )\n tic = pyb.millis() # start time\n oled.display()\n pyb.delay(100) # delay by random numner of millisec\n","sub_path":"Labs/Lab_4/lab4_ex5.py","file_name":"lab4_ex5.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"398438018","text":"\"\"\"Overarching catalog object for all open catalogs.\n\"\"\"\nimport codecs\nimport importlib\nimport json\nimport os\nimport subprocess\nimport sys\nimport warnings\nfrom collections import OrderedDict\nfrom glob import glob\n\nimport psutil\nfrom astrocats import __version__\nfrom astrocats.catalog.entry import ENTRY, Entry\nfrom astrocats.catalog.source import SOURCE\nfrom astrocats.catalog.task import Task\nfrom astrocats.catalog.utils import (compress_gz, is_integer, pbar,\n read_json_dict, repo_priority,\n uncompress_gz, uniq_cdl)\nfrom git import Repo\nfrom tqdm import tqdm\n\n\nclass Catalog:\n \"\"\"Object to hold the main catalog dictionary and other catalog globals.\n\n Attributes\n ----------\n OSC_BIBCODE\n OSC_NAME\n OSC_URL\n ADS_BIB_URL\n TRAVIS_QUERY_LIMIT\n COMPRESS_ABOVE_FILESIZE\n\n \"\"\"\n\n OSC_BIBCODE = '2016arXiv160501054G'\n OSC_NAME = 'The Open Supernova Catalog'\n OSC_URL = 'https://sne.space'\n\n ADS_BIB_URL = (\"http://adsabs.harvard.edu/cgi-bin/nph-abs_connect?\"\n \"db_key=ALL&version=1&bibcode=\")\n\n TRAVIS_QUERY_LIMIT = 10\n COMPRESS_ABOVE_FILESIZE = 90e6 # bytes\n\n class PATHS:\n \"\"\"Store and control catalog file-structure information.\n\n Individual catalogs must provide the below file structure.\n - `repos.json`\n - `tasks.json`\n\n Attributes\n ----------\n catalog : `astrocats.catalog.catalog.Catalog` (sub)class object\n catalog_dir : str\n tasks_dir : str\n PATH_BASE : str\n PATH_INPUT : str\n PATH_OUTPUT : str\n REPOS_LIST : str\n TASK_LIST : str\n repos_dict : dict\n Dictionary of 'repo-types: repo-lists' key-value pairs.\n Loaded from `REPOS_LIST` file.\n\n Methods\n -------\n get_all_repo_folders : get a list of paths for all data repositories\n get_repo_boneyard : get the path of the boneyard repository\n get_repo_input_folders : get the paths of all input data repositories\n get_repo_output_file_list : get the paths of all files in output repos\n get_repo_output_folders : get the paths of all input data repositories\n\n \"\"\"\n\n def __init__(self, catalog):\n self.catalog = catalog\n this_file = sys.modules[self.__module__].__file__\n self.catalog_dir = os.path.dirname(this_file)\n self.tasks_dir = os.path.join(self.catalog_dir, 'tasks')\n self.PATH_BASE = os.path.join(\n catalog.args.base_path, self.catalog_dir, '')\n self.PATH_INPUT = os.path.join(self.PATH_BASE, 'input', '')\n self.PATH_OUTPUT = os.path.join(self.PATH_BASE, 'output', '')\n # critical datafiles\n self.REPOS_LIST = os.path.join(self.PATH_INPUT, 'repos.json')\n self.TASK_LIST = os.path.join(self.PATH_INPUT, 'tasks.json')\n self.repos_dict = read_json_dict(self.REPOS_LIST)\n return\n\n def _get_repo_file_list(self, repo_folders, normal=True, bones=True):\n \"\"\"Get filenames for files in each repository, `boneyard` optional.\n \"\"\"\n # repo_folders = get_repo_output_folders()\n files = []\n for rep in repo_folders:\n if 'boneyard' not in rep and not normal:\n continue\n if not bones and 'boneyard' in rep:\n continue\n these_files = glob(rep + \"/*.json\") + glob(rep + \"/*.json.gz\")\n self.catalog.log.debug(\"Found {} files in '{}'\".format(\n len(these_files), rep))\n files += these_files\n\n return files\n\n def get_all_repo_folders(self, boneyard=True):\n \"\"\"Get the full paths of all data repositories.\n \"\"\"\n all_repos = self.get_repo_input_folders()\n all_repos.extend(self.get_repo_output_folders(bones=boneyard))\n return all_repos\n\n def get_repo_boneyard(self):\n bone_path = self.repos_dict['boneyard']\n try:\n bone_path = bone_path[0]\n except TypeError:\n pass\n bone_path = os.path.join(self.PATH_OUTPUT, bone_path, '')\n return bone_path\n\n def get_repo_input_folders(self):\n \"\"\"Get the full paths of the input data repositories.\n \"\"\"\n repo_folders = []\n repo_folders += self.repos_dict['external']\n repo_folders += self.repos_dict['internal']\n repo_folders = list(sorted(set(repo_folders)))\n repo_folders = [os.path.join(self.PATH_INPUT, rf)\n for rf in repo_folders]\n return repo_folders\n\n def get_repo_output_file_list(self, normal=True, bones=True):\n \"\"\"Get a list of all existing output files.\n\n These are the files deleted in the `delete_old_entry_files` task.\n \"\"\"\n repo_folders = self.get_repo_output_folders()\n return self._get_repo_file_list(\n repo_folders, normal=normal, bones=bones)\n\n def get_repo_output_folders(self, bones=True):\n \"\"\"Get the full paths of the output data repositories.\n \"\"\"\n repo_folders = []\n repo_folders += self.repos_dict['output']\n if bones:\n repo_folders += self.repos_dict['boneyard']\n repo_folders = list(sorted(list(set(repo_folders)),\n key=lambda key: repo_priority(key)))\n repo_folders = [os.path.join(self.PATH_OUTPUT, rf)\n for rf in repo_folders]\n return repo_folders\n\n class SCHEMA:\n HASH = ''\n URL = ''\n\n def __init__(self, args, log):\n # Store runtime arguments\n self.args = args\n self.log = log\n self.proto = Entry\n\n # Instantiate PATHS\n self.PATHS = self.PATHS(self)\n\n # Load repos dictionary (required)\n self.repos_dict = read_json_dict(self.PATHS.REPOS_LIST)\n self.clone_repos()\n\n # Create empty `entries` collection\n self.entries = OrderedDict()\n self.aliases = {}\n\n # Only journal tasks with priorities greater than this number,\n # unless updating.\n self.min_journal_priority = 0\n\n # Store version information\n # -------------------------\n # git `SHA` of this directory (i.e. a sub-catalog)\n my_path = self.PATHS.catalog_dir\n catalog_sha = subprocess.check_output(\n [\"git\", \"rev-parse\", \"--short\", \"HEAD\"], cwd=my_path)\n catalog_sha = catalog_sha.decode('ascii').strip()\n # Git SHA of `astrocats`\n parent_path = os.path.join(my_path, os.pardir)\n astrocats_sha = subprocess.check_output(\n [\"git\", \"rev-parse\", \"--short\", \"HEAD\"], cwd=parent_path)\n astrocats_sha = astrocats_sha.decode('ascii').strip()\n # Name of this class (if subclassed)\n my_name = type(self).__name__\n self._version_long = \"Astrocats v'{}' SHA'{}' - {} SHA'{}\".format(\n __version__, astrocats_sha, my_name, catalog_sha)\n\n return\n\n def import_data(self):\n \"\"\"Run all of the import tasks.\n\n This is executed by the 'scripts.main.py' when the module is run as an\n executable. This can also be run as a method, in which case default\n arguments are loaded, but can be overriden using `**kwargs`.\n \"\"\"\n\n tasks_list = self.load_task_list()\n warnings.filterwarnings(\n 'ignore', r'Warning: converting a masked element to nan.')\n # FIX\n warnings.filterwarnings(\n 'ignore', category=DeprecationWarning)\n\n # Delete all old (previously constructored) output files\n if self.args.delete_old:\n self.log.warning(\"Deleting all old entry files.\")\n self.delete_old_entry_files()\n\n if self.args.travis:\n self.log.warning(\"Running in `travis` mode.\")\n\n prev_priority = 0\n prev_task_name = ''\n # for task, task_obj in tasks_list.items():\n for task_name, task_obj in tasks_list.items():\n if not task_obj.active:\n continue\n self.log.warning(\"Task: '{}'\".format(task_name))\n\n nice_name = task_obj.nice_name\n mod_name = task_obj.module\n func_name = task_obj.function\n priority = task_obj.priority\n\n # Make sure things are running in the correct order\n if priority < prev_priority and priority > 0:\n raise RuntimeError(\n \"Priority for '{}': '{}', less than prev,\"\n \"'{}': '{}'.\\n{}\"\n .format(task_name, priority, prev_task_name, prev_priority,\n task_obj))\n\n self.log.debug(\"\\t{}, {}, {}, {}\".format(\n nice_name, priority, mod_name, func_name))\n mod = importlib.import_module('.' + mod_name, package='astrocats')\n self.current_task = task_obj\n getattr(mod, func_name)(self)\n\n num_events, num_stubs = self.count()\n self.log.warning(\"Task finished. Events: {}, Stubs: {}\".format(\n num_events, num_stubs))\n self.journal_entries()\n num_events, num_stubs = self.count()\n self.log.warning(\"Journal finished. Events: {}, Stubs: {}\".format(\n num_events, num_stubs))\n\n prev_priority = priority\n prev_task_name = task_name\n\n process = psutil.Process(os.getpid())\n memory = process.memory_info().rss\n self.log.warning('Memory used (MBs): '\n '{:,}'.format(memory / 1024. / 1024.))\n return\n\n def load_task_list(self):\n \"\"\"Load the list of tasks in this catalog's 'input/tasks.json' file.\n\n A `Task` object is created for each entry, with the parameters filled\n in. These are placed in an OrderedDict, sorted by the `priority`\n parameter, with positive values and then negative values,\n e.g. [0, 2, 10, -10, -1].\n \"\"\"\n\n # Dont allow both a 'min' and 'max' task priority\n if ((self.args.min_task_priority is not None and\n self.args.max_task_priority is not None)):\n raise ValueError(\"Can only use *either* 'min' *or* 'max' priority\")\n\n # Load tasks data from input json file\n tasks, task_names = self._load_task_list_from_file()\n\n # Make sure 'active' modification lists are all valid\n args_lists = [self.args.args_task_list,\n self.args.yes_task_list, self.args.no_task_list]\n args_names = ['--tasks', '--yes', '--no']\n for arglist, lname in zip(args_lists, args_names):\n if arglist is not None:\n for tname in arglist:\n if tname not in task_names:\n raise ValueError(\n \"Value '{}' in '{}' list does not match\"\n \" any tasks\".format(tname, lname))\n\n # Process min/max priority specification ('None' if none given)\n min_priority = _get_task_priority(tasks, self.args.min_task_priority)\n max_priority = _get_task_priority(tasks, self.args.max_task_priority)\n task_groups = self.args.task_groups\n if task_groups is not None:\n if not isinstance(task_groups, list):\n task_groups = [task_groups]\n\n # Iterate over all tasks to determine which should be (in)active\n # --------------------------------------------------------------\n for key in tasks:\n # If specific list of tasks is given, make only those active\n if self.args.args_task_list is not None:\n if key in self.args.args_task_list:\n tasks[key].active = True\n else:\n tasks[key].active = False\n\n # Only run tasks above minimum priority\n # (doesn't modify negtive priority tasks)\n if min_priority is not None and tasks[key].priority >= 0:\n tasks[key].active = False\n if tasks[key].priority >= min_priority:\n tasks[key].active = True\n\n # Only run tasks below maximum priority\n # (doesnt modify negative priority tasks)\n if max_priority is not None and tasks[key].priority >= 0:\n tasks[key].active = False\n if tasks[key].priority <= max_priority:\n tasks[key].active = True\n\n # Set 'yes' tasks to *active*\n if self.args.yes_task_list is not None:\n if key in self.args.yes_task_list:\n tasks[key].active = True\n # Set 'no' tasks to *inactive*\n if self.args.no_task_list is not None:\n if key in self.args.no_task_list:\n tasks[key].active = False\n # Set tasks in target 'groups' to *active*\n if task_groups is not None and tasks[key].groups is not None:\n # Go through each group defined in the command line\n for given_group in task_groups:\n # If this task is a member of any of those groups\n if given_group in tasks[key].groups:\n tasks[key].active = True\n break\n\n # Sort entries as positive values, then negative values\n # [0, 1, 2, 2, 10, -100, -10, -1]\n # Tuples are sorted by first element (here: '0' if positive), then\n # second (here normal order)\n tasks = OrderedDict(sorted(tasks.items(), key=lambda t: (\n t[1].priority < 0, t[1].priority, t[1].name)))\n\n # Find the first task that has \"always_journal\" set to True\n for key in tasks:\n if tasks[key].active and tasks[key].always_journal:\n self.min_journal_priority = tasks[key].priority\n break\n\n names_act = []\n names_inact = []\n for key, val in tasks.items():\n if val.active:\n names_act.append(key)\n else:\n names_inact.append(key)\n\n self.log.info(\"Active Tasks:\\n\\t\" + \", \".join(nn for nn in names_act))\n self.log.debug(\"Inactive Tasks:\\n\\t\" +\n \", \".join(nn for nn in names_inact))\n return tasks\n\n def _load_task_list_from_file(self):\n \"\"\"\n \"\"\"\n def_task_list_filename = self.PATHS.TASK_LIST\n self.log.debug(\n \"Loading task-list from '{}'\".format(def_task_list_filename))\n data = json.load(open(def_task_list_filename, 'r'))\n # Create `Task` objects for each element in the tasks data file\n tasks = {}\n task_names = []\n for key, val in data.items():\n tasks[key] = Task(name=key, **val)\n task_names.append(key)\n return tasks, task_names\n\n def save_caches(self):\n return\n\n def _clone_repos(self, all_repos):\n \"\"\"Given a list of repositories, make sure they're all cloned.\n\n Should be called from the subclassed `Catalog` objects, passed a list\n of specific repository names.\n\n Arguments\n ---------\n all_repos : list of str\n *Absolute* path specification of each target repository.\n\n \"\"\"\n for repo in all_repos:\n if not os.path.isdir(repo):\n try:\n repo_name = os.path.split(repo)[-1]\n self.log.warning(\n 'Cloning \"' + repo + '\" (only needs to be done ' +\n 'once, may take few minutes per repo).')\n Repo.clone_from(\"https://github.com/astrocatalogs/\" +\n repo_name + \".git\", repo,\n **({'depth': self.args.clone_depth} if\n self.args.clone_depth > 0 else {}))\n except:\n self.log.error(\"CLONING '{}' INTERRUPTED\".format(repo))\n raise\n\n return\n\n def clone_repos(self):\n self._clone_repos([])\n\n def git_add_commit_push_all_repos(self):\n \"\"\"Add all files in each data repository tree, commit, push.\n\n Creates a commit message based on the current catalog version info.\n\n If either the `git add` or `git push` commands fail, an error will be\n raised. Currently, if `commit` fails an error *WILL NOT* be raised\n because the `commit` command will return a nonzero exit status if\n there are no files to add... which we dont want to raise an error.\n FIX: improve the error checking on this.\n \"\"\"\n all_repos = self.PATHS.get_all_repo_folders()\n for repo in all_repos:\n self.log.warning(\"Repo in: '{}'\".format(repo))\n # Get the initial git SHA\n git_comm = \"git rev-parse HEAD {}\".format(repo)\n sha_beg = subprocess.getoutput(git_comm)\n self.log.debug(\"Current SHA: '{}'\".format(sha_beg))\n\n # Get files that should be added, compress and check sizes\n add_files = self._prep_git_add_file_list(\n repo, self.COMPRESS_ABOVE_FILESIZE)\n self.log.info(\"Found {} Files to add.\".format(len(add_files)))\n if len(add_files) == 0:\n continue\n\n try:\n # Add all files in the repository directory tree\n git_comm = [\"git\", \"add\"]\n git_comm.extend(add_files)\n _call_command_in_repo(git_comm, repo, self.log,\n fail=True, log_flag=False)\n\n # Commit these files\n commit_msg = \"'push' - adding all files.\"\n commit_msg = \"{} : {}\".format(self._version_long, commit_msg)\n self.log.info(commit_msg)\n git_comm = [\"git\", \"commit\", \"-am\", commit_msg]\n _call_command_in_repo(git_comm, repo, self.log)\n\n # Add all files in the repository directory tree\n git_comm = [\"git\", \"push\"]\n _call_command_in_repo(git_comm, repo, self.log, fail=True)\n except Exception as err:\n try:\n git_comm = [\"git\", \"reset\", \"HEAD\"]\n _call_command_in_repo(git_comm, repo, self.log, fail=True)\n except:\n pass\n\n raise err\n\n return\n\n def load_entry_from_name(self, name, delete=True, merge=True):\n loaded_entry = self.proto.init_from_file(self, name=name, merge=merge)\n if loaded_entry is not None:\n self.entries[name] = loaded_entry\n self.log.debug(\n \"Added '{}', from '{}', to `self.entries`\".format(\n name, loaded_entry.filename))\n # Delete source file, if desired\n if delete:\n self._delete_entry_file(entry=loaded_entry)\n return name\n return None\n\n def add_entry(self, name, load=True, delete=True):\n \"\"\"Find an existing entry in, or add a new one to, the `entries` dict.\n\n FIX: rename to `create_entry`???\n\n Returns\n -------\n entries : OrderedDict of Entry objects\n newname : str\n Name of matching entry found in `entries`, or new entry added to\n `entries`\n \"\"\"\n newname = self.clean_entry_name(name)\n # If entry already exists, return\n if newname in self.entries:\n self.log.debug(\n \"`newname`: '{}' (name: '{}') already exists.\".\n format(newname, name))\n # If this is a stub, we need to continue, possibly load file\n if self.entries[newname]._stub:\n self.log.debug(\"'{}' is a stub\".format(newname))\n # If a full (non-stub) event exists, return its name\n else:\n self.log.debug(\"'{}' is not a stub, returning\".format(newname))\n return newname\n\n # If entry is alias of another entry in `entries`, find and return that\n match_name = self.find_entry_name_of_alias(newname)\n if match_name is not None:\n self.log.debug(\n \"`newname`: '{}' (name: '{}') already exists as alias for \"\n \"'{}'.\".format(newname, name, match_name))\n newname = match_name\n\n # Load entry from file\n if load:\n loaded_name = self.load_entry_from_name(newname, delete=delete)\n if loaded_name:\n return loaded_name\n\n # If we match an existing event, return that\n if match_name is not None:\n return match_name\n\n # Create new entry\n new_entry = self.proto(self, newname)\n new_entry[self.proto._KEYS.SCHEMA] = self.SCHEMA.URL\n self.log.log(self.log._LOADED,\n \"Created new entry for '{}'\".format(newname))\n # Add entry to dictionary\n self.entries[newname] = new_entry\n return newname\n\n def delete_old_entry_files(self):\n if len(self.entries):\n err_str = \"`delete_old_entry_files` with `entries` not empty!\"\n self.log.error(err_str)\n raise RuntimeError(err_str)\n # Delete all old entry JSON files\n repo_files = self.PATHS.get_repo_output_file_list()\n for rfil in pbar(repo_files, desc='Deleting old entries'):\n os.remove(rfil)\n self.log.debug(\"Deleted '{}'\".format(os.path.split(rfil)[-1]))\n return\n\n def get_preferred_name(self, name):\n if name not in self.entries:\n # matches = []\n for entry in self.entries:\n aliases = self.entries[entry].get_aliases(includename=False)\n if len(aliases) > 1 and name in aliases:\n return entry\n return name\n else:\n return name\n\n def find_entry_name_of_alias(self, alias):\n \"\"\"Return the first entry name with the given 'alias' included in its\n list of aliases.\n\n Returns\n -------\n name of matching entry (str) or 'None' if no matches\n\n \"\"\"\n if alias in self.aliases:\n name = self.aliases[alias]\n if name in self.entries:\n return name\n else:\n # Name wasn't found, possibly merged or deleted. Now look\n # really hard.\n for name, entry in self.entries.items():\n aliases = entry.get_aliases(includename=False)\n if alias in aliases:\n if ((ENTRY.DISTINCT_FROM not in entry) or\n (alias not in entry[ENTRY.DISTINCT_FROM])):\n return name\n\n return None\n\n def copy_to_entry_in_catalog(self, fromname, destname):\n self.copy_entry_to_entry(self.entries[fromname],\n self.entries[destname])\n\n def copy_entry_to_entry(self, fromentry, destentry):\n \"\"\"\n\n Used by `merge_duplicates`\n \"\"\"\n self.log.info(\"Copy entry object '{}' to '{}'\"\n .format(fromentry[fromentry._KEYS.NAME],\n destentry[destentry._KEYS.NAME]))\n newsourcealiases = {}\n\n if self.proto._KEYS.SOURCES in fromentry:\n for source in fromentry[self.proto._KEYS.SOURCES]:\n alias = source.pop(SOURCE.ALIAS)\n newsourcealiases[alias] = source\n\n if self.proto._KEYS.ERRORS in fromentry:\n for err in fromentry[self.proto._KEYS.ERRORS]:\n destentry.setdefault(\n self.proto._KEYS.ERRORS, []).append(err)\n\n for key in fromentry:\n if fromentry._KEYS.get_key_by_name(key).no_source:\n continue\n for item in fromentry[key]:\n # isd = False\n if 'source' not in item:\n raise ValueError(\"Item has no source!\")\n\n nsid = []\n for sid in item['source'].split(','):\n if sid in newsourcealiases:\n source = newsourcealiases[sid]\n nsid.append(destentry\n .add_source(**source))\n else:\n raise ValueError(\"Couldn't find source alias!\")\n\n item['source'] = uniq_cdl(nsid)\n\n if key == ENTRY.PHOTOMETRY:\n destentry.add_photometry(**item)\n elif key == ENTRY.SPECTRA:\n destentry.add_spectrum(**item)\n elif key == ENTRY.ERRORS:\n destentry.add_error(**item)\n else:\n destentry.add_quantity(check_for_dupes=False, quantity=key,\n **item)\n\n return\n\n def clean_entry_name(self, name):\n \"\"\"Template method to clean/sanitize an entry name before setting it.\n\n Should be overridden appropriately in subclassed `Catalog` objects.\n \"\"\"\n return name\n\n def new_entry(self, name, load=True, delete=True,\n loadifempty=True, srcname='', reference='', url='',\n bibcode='', secondary=False, acknowledgment=''):\n newname = self.add_entry(name, load=load, delete=delete)\n source = self.entries[newname].add_source(\n bibcode=bibcode, name=srcname, reference=reference, url=url,\n secondary=secondary, acknowledgment=acknowledgment)\n self.entries[newname].add_quantity(ENTRY.ALIAS, name, source)\n return newname, source\n\n def merge_duplicates(self):\n \"\"\"Merge and remove duplicate entries.\n\n Compares each entry ('name') in `stubs` to all later entries to check\n for duplicates in name or alias. If a duplicate is found, they are\n merged and written to file.\n \"\"\"\n if len(self.entries) == 0:\n self.log.error(\"WARNING: `entries` is empty, loading stubs\")\n if self.args.update:\n self.log.warning(\n \"No sources changed, entry files unchanged in update.\"\n \" Skipping merge.\")\n return\n self.entries = self.load_stubs()\n\n task_str = self.get_current_task_str()\n\n keys = list(sorted(self.entries.keys()))\n n1 = 0\n mainpbar = tqdm(total=len(keys), desc=task_str)\n while n1 < len(keys):\n name1 = keys[n1]\n if name1 not in self.entries:\n self.log.info(\"Entry for {} not found, likely already \"\n \"deleted in merging process.\".format(name1))\n n1 = n1 + 1\n mainpbar.update(1)\n continue\n allnames1 = set(self.entries[name1].get_aliases() +\n self.entries[name1].extra_aliases())\n\n # Search all later names\n for name2 in keys[n1 + 1:]:\n if name1 == name2:\n continue\n if name1 not in self.entries:\n self.log.info(\"Entry for {} not found, likely already \"\n \"deleted in merging process.\".format(name1))\n continue\n if name2 not in self.entries:\n self.log.info(\"Entry for {} not found, likely already \"\n \"deleted in merging process.\".format(name2))\n continue\n\n allnames2 = set(self.entries[name2].get_aliases() +\n self.entries[name2].extra_aliases())\n\n # If there are any common names or aliases, merge\n if len(allnames1 & allnames2):\n self.log.warning(\n \"Found two entries with common aliases \"\n \"('{}' and '{}'), merging.\".format(name1, name2))\n\n load1 = self.proto.init_from_file(\n self, name=name1)\n load2 = self.proto.init_from_file(\n self, name=name2)\n if load1 is not None and load2 is not None:\n # Delete old files\n self._delete_entry_file(entry=load1)\n self._delete_entry_file(entry=load2)\n self.entries[name1] = load1\n self.entries[name2] = load2\n priority1 = 0\n priority2 = 0\n for an in allnames1:\n if an.startswith(self.entries[name1]\n .priority_prefixes()):\n priority1 += 1\n for an in allnames2:\n if an.startswith(self.entries[name2]\n .priority_prefixes()):\n priority2 += 1\n\n if priority1 > priority2:\n self.copy_to_entry_in_catalog(name2, name1)\n keys.append(name1)\n del self.entries[name2]\n else:\n self.copy_to_entry_in_catalog(name1, name2)\n keys.append(name2)\n del self.entries[name1]\n else:\n self.log.warning('Duplicate already deleted')\n\n # if len(self.entries) != 1:\n # self.log.error(\n # \"WARNING: len(entries) = {}, expected 1. \"\n # \"Still journaling...\".format(len(self.entries)))\n self.journal_entries()\n\n if self.args.travis and n1 > self.TRAVIS_QUERY_LIMIT:\n break\n n1 = n1 + 1\n mainpbar.update(1)\n mainpbar.close()\n\n def sanitize(self):\n task_str = self.get_current_task_str()\n for name in pbar(list(sorted(self.entries.keys())), task_str):\n self.add_entry(name)\n self.journal_entries(bury=True, final=True)\n\n def load_stubs(self):\n \"\"\"\n \"\"\"\n currenttask = 'Loading entry stubs'\n files = self.PATHS.get_repo_output_file_list()\n for fi in pbar(files, currenttask):\n fname = fi\n # FIX: should this be ``fi.endswith(``.gz')`` ?\n if '.gz' in fi:\n fname = uncompress_gz(fi)\n name = os.path.basename(\n os.path.splitext(fname)[0]).replace('.json', '')\n new_entry = self.proto.init_from_file(\n self, path=fname, delete=False)\n # Make sure a non-stub entry doesnt already exist with this name\n if name in self.entries and not self.entries[name]._stub:\n err_str = (\n \"ERROR: non-stub entry already exists with name '{}'\"\n .format(name))\n self.log.error(err_str)\n raise RuntimeError(err_str)\n\n self.entries[name] = new_entry.get_stub()\n self.log.debug(\"Added stub for '{}'\".format(name))\n\n return self.entries\n\n def _delete_entry_file(self, entry_name=None, entry=None):\n \"\"\"Delete the file associated with the given entry.\n \"\"\"\n if entry_name is None and entry is None:\n raise RuntimeError(\"Either `entry_name` or `entry` must be given.\")\n elif entry_name is not None and entry is not None:\n raise RuntimeError(\"Cannot use both `entry_name` and `entry`.\")\n\n if entry_name is not None:\n entry = self.entries[entry_name]\n else:\n entry_name = entry[ENTRY.NAME]\n\n outdir, filename = entry._get_save_path()\n # FIX: do we also need to check for gzipped files??\n entry_filename = os.path.join(outdir, filename + '.json')\n # entry_filename = entry.filename\n\n if self.args.write_entries:\n self.log.info(\"Deleting entry file '{}' of entry '{}'\".format(\n entry_filename, entry_name))\n if not os.path.exists(entry_filename):\n self.log.error(\"Filename '{}' does not exist\".format(\n entry_filename))\n os.remove(entry_filename)\n else:\n self.log.debug(\"Not deleting '{}' because `write_entries`\"\n \" is False\".format(entry_filename))\n\n return\n\n def should_bury(self, name):\n return (False, True)\n\n def journal_entries(self, clear=True, gz=False, bury=False,\n write_stubs=False, final=False):\n \"\"\"Write all entries in `entries` to files, and clear. Depending on\n arguments and `tasks`.\n\n Iterates over all elements of `entries`, saving (possibly 'burying')\n and deleting.\n - If ``clear == True``, then each element of `entries` is deleted,\n and a `stubs` entry is added\n \"\"\"\n\n # if (self.current_task.priority >= 0 and\n # self.current_task.priority < self.min_journal_priority):\n # return\n\n # Write it all out!\n # NOTE: this needs to use a `list` wrapper to allow modification of\n # dict\n for name in list(self.entries.keys()):\n if self.args.write_entries:\n # If this is a stub and we aren't writing stubs, skip\n if self.entries[name]._stub and not write_stubs:\n continue\n\n # Bury non-SN entries here if only claimed type is non-SN type,\n # or if primary name starts with a non-SN prefix.\n bury_entry = False\n save_entry = True\n if bury:\n (bury_entry, save_entry) = self.should_bury(name)\n\n if save_entry:\n save_name = self.entries[name].save(bury=bury_entry,\n final=final)\n self.log.info(\n \"Saved {} to '{}'.\".format(name.ljust(20), save_name))\n if (gz and os.path.getsize(save_name) >\n self.COMPRESS_ABOVE_FILESIZE):\n save_name = compress_gz(save_name)\n self.log.debug(\"Compressed '{}' to '{}'\".format(\n name, save_name))\n # FIX: use subprocess\n outdir, filename = os.path.split(save_name)\n filename = filename.split('.')[0]\n os.system('cd ' + outdir + '; git rm --cached ' +\n filename +\n '.json; git add -f ' + filename +\n '.json.gz; cd ' + self.PATHS.PATH_BASE)\n\n if clear:\n self.entries[name] = self.entries[name].get_stub()\n self.log.debug(\"Entry for '{}' converted to stub\".format(name))\n\n return\n\n def entry_exists(self, name):\n if name in self.entries:\n return True\n for ev in self.entries:\n if name in self.entries[ev].get_aliases(includename=False):\n return True\n return False\n\n def count(self):\n full = 0\n stub = 0\n for ev in self.entries:\n if self.entries[ev]._stub:\n stub += 1\n else:\n full += 1\n return full, stub\n\n def get_current_task_str(self):\n \"\"\"Get a string describing the current task the catalog is working on.\n \"\"\"\n return self.current_task.current_task(self.args)\n\n def get_current_task_repo(self):\n \"\"\"Get the data repository corresponding to the currently active task.\n \"\"\"\n return self.current_task._get_repo_path(self.PATHS.PATH_BASE)\n\n def set_preferred_names(self):\n \"\"\"Choose between each entries given name and its possible aliases for\n the best one.\n \"\"\"\n if len(self.entries) == 0:\n self.log.error(\"WARNING: `entries` is empty, loading stubs\")\n self.load_stubs()\n\n task_str = self.get_current_task_str()\n for ni, oname in enumerate(pbar(self.entries, task_str)):\n name = self.add_entry(oname)\n self.entries[name].set_preferred_name()\n\n if self.args.travis and ni > self.TRAVIS_QUERY_LIMIT:\n break\n\n return\n\n def load_cached_url(self, url, filepath, timeout=120, write=True,\n failhard=False, jsonsort=''):\n from hashlib import md5\n filemd5 = ''\n file_txt = ''\n # Load existing, cached copy of online data file\n if not self.args.refresh and os.path.isfile(filepath):\n with codecs.open(filepath, 'r', encoding='utf8') as f:\n file_txt = f.read()\n self.log.debug(\"{}: Loaded `file_txt` from '{}'.\".format(\n self.current_task, filepath))\n if self.args.update:\n filemd5 = md5(file_txt.encode('utf-8')).hexdigest()\n\n # Try to download new copy of online data\n try:\n import requests\n session = requests.Session()\n response = session.get(url, timeout=timeout)\n response.raise_for_status()\n # Look for errors\n for x in response.history:\n x.raise_for_status()\n if (x.status_code == 500 or x.status_code == 307 or\n x.status_code == 404):\n raise\n url_txt = response.text\n self.log.debug(\"{}: Loaded `url_txt` from '{}'.\".format(\n self.current_task, url))\n newmd5 = md5(url_txt.encode('utf-8')).hexdigest()\n # tprint(filemd5 + \": \" + newmd5)\n # Check if cached file and newly downloaded file are the same\n # If so: no need to resave it, return\n if self.args.update and newmd5 == filemd5:\n self.log.debug(\n 'Skipping file in \"' + self.current_task +\n ',\" local and remote copies identical [' + newmd5 + '].')\n return False\n except (KeyboardInterrupt, SystemExit):\n raise\n except:\n if failhard:\n return ''\n return file_txt\n else:\n # Write the newly downloaded data to the cache save file.\n if write:\n wtxt = url_txt if url_txt else file_txt\n if jsonsort and '.json' in filepath:\n jdict = json.loads(wtxt)\n wtxt = json.dumps(\n list(sorted(jdict, key=lambda kk: kk[jsonsort])),\n indent=4, separators=(',', ': '))\n with codecs.open(filepath, 'w', encoding='utf8') as f:\n f.write(wtxt)\n self.log.debug(\"{}: wrote txt to '{}'.\".format(\n self.current_task, filepath))\n\n return url_txt\n\n def _prep_git_add_file_list(self, repo, size_limit,\n fail=True, file_types=None):\n \"\"\"Get a list of files which should be added to the given repository.\n\n Notes\n -----\n * Finds files in the *root* of the given repository path.\n * If `file_types` is given, only use those file types.\n * If an uncompressed file is above the `size_limit`, it is compressed.\n * If a compressed file is above the file limit, an error is raised\n (if `fail = True`) or it is skipped (if `fail == False`).\n\n Arguments\n ---------\n repo : str\n Path to repository\n size_limit : scalar\n fail : bool\n Raise an error if a compressed file is still above the size limit.\n file_types : list of str or None\n Exclusive list of file types to add. 'None' to add all filetypes.\n\n \"\"\"\n add_files = []\n if file_types is None:\n file_patterns = ['*']\n else:\n self.log.error(\n \"WARNING: uncertain behavior with specified file types!\")\n file_patterns = ['*.' + ft for ft in file_types]\n\n # Construct glob patterns for each file-type\n file_patterns = [os.path.join(repo, fp) for fp in file_patterns]\n for pattern in file_patterns:\n file_list = glob(pattern)\n for ff in file_list:\n fsize = os.path.getsize(ff)\n fname = str(ff)\n comp_failed = False\n # If the found file is too large\n if fsize > size_limit:\n self.log.debug(\"File '{}' size '{}' MB.\".format(\n fname, fsize/1028/1028))\n # If the file is already compressed... fail or skip\n if ff.endswith('.gz'):\n self.log.error(\n \"File '{}' is already compressed.\".format(fname))\n comp_failed = True\n # Not yet compressed - compress it\n else:\n fname = compress_gz(fname)\n fsize = os.path.getsize(fname)\n self.log.info(\n \"Compressed to '{}', size '{}' MB\".format(\n fname, fsize/1028/1028))\n # If still too big, fail or skip\n if fsize > size_limit:\n comp_failed = True\n\n # If compressed file is too large, skip file or raise error\n if comp_failed:\n # Raise an error\n if fail:\n raise RuntimeError(\n \"File '{}' cannot be added!\".format(fname))\n # Skip file without adding it\n self.log.info(\"Skipping file.\")\n continue\n\n # If everything is good, add file to list\n add_files.append(fname)\n\n return add_files\n\n\ndef _get_task_priority(tasks, task_priority):\n \"\"\"Get the task `priority` corresponding to the given `task_priority`.\n\n If `task_priority` is an integer or 'None', return it.\n If `task_priority` is a str, return the priority of the task it matches.\n Otherwise, raise `ValueError`.\n \"\"\"\n if task_priority is None:\n return None\n if is_integer(task_priority):\n return task_priority\n if isinstance(task_priority, str):\n if task_priority in tasks:\n return tasks[task_priority].priority\n\n raise ValueError(\"Unrecognized task priority '{}'\".format(task_priority))\n\n\ndef _call_command_in_repo(comm, repo, log, fail=False, log_flag=True):\n \"\"\"Use `subprocess` to call a command in a certain (repo) directory.\n\n Logs the output (both `stderr` and `stdout`) to the log, and checks the\n return codes to make sure they're valid. Raises error if not.\n\n Raises\n ------\n exception `subprocess.CalledProcessError`: if the command fails\n\n \"\"\"\n if log_flag:\n log.debug(\"Running '{}'.\".format(\" \".join(comm)))\n retval = subprocess.run(comm, cwd=repo, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n if retval.stderr is not None:\n err_msg = retval.stderr.decode('ascii').strip().splitlines()\n for em in err_msg:\n log.error(em)\n if retval.stdout is not None:\n out_msg = retval.stdout.decode('ascii').strip().splitlines()\n for om in out_msg:\n log.info(om)\n # Raises an error if the command failed.\n if fail:\n retval.check_returncode()\n return\n","sub_path":"astrocats/catalog/catalog.py","file_name":"catalog.py","file_ext":"py","file_size_in_byte":44107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"111272037","text":"from collections import defaultdict\n\nclass Graph:\n def __init__(self):\n self.edges=defaultdict(list)\n\n def addedge(self,src,dest):\n self.edges[src].append(dest)\n\n \nG=Graph()\nG.addedge(\"a\",\"b\")\nG.addedge(\"b\",\"c\")\nG.addedge(\"c\",\"a\")\n\nprint(G.edges)","sub_path":"Python DS & ALGO/graphs/ctcigraphs.py","file_name":"ctcigraphs.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"608598842","text":"#!/usr/bin/python2\n\n# Jiska Classen, Secure Mobile Networking Lab\n\nimport os\nimport sys\nfrom argparse import Namespace\n\nimport numpy as np\nfrom pwnlib.asm import asm\n\nimport internalblue.hci as hci\nfrom internalblue.cli import InternalBlueCLI\nfrom internalblue.hcicore import HCICore\n\n\"\"\"\nMeasure the RNG of the Raspberry Pi 3.\nSimilar to matedealer's thesis, p. 51.\n\nChanges:\n\n* Every 5th byte is now 0x42 to ensure that no other process wrote\n into this memory region in the meantime. Does it job and cheaper\n than checksums.\n\n* When we are done, we send an HCI event containing 'RAND'. We catch\n this with a callback. Way more efficient than polling.\n\n* We overwrite the original `rbg_rand` function with `bx lr` to\n ensure we're the only ones accessing the RNG.\n \n* Disable Wi-Fi as the RNG might be shared.\n\n\"\"\"\n\nASM_LOCATION_RNG = 0x21f000 # load our snippet here, yes we have space :)\nMEM_RNG = ASM_LOCATION_RNG + 0xf0 # store results here\nMEM_ROUNDS = 0x1000 # run this often (x5 bytes)\nFUN_RNG = 0x6672A # original RNG function that we overwrite with bx lr\nPRAND = 0x318088 # the pseudo random register we want to benchmark\n# 0x318088 dc_nbtc_clk_adr\n# 0x32A004 timer1value_adr\n# 0x3186A0 dc_fhout_adr\n# 0x31FC34 agcStatus_adr\n# 0x31FFA0 rxInitAngle_adr\n# 0x31F8A4 spurFreqErr1_adr\n# 0x31FD48 rxPskPhErr5_adr\n# 0x200480 *mm_top TODO needs special memcpy but is only used once for init\n\n\nASM_SNIPPET_RNG = \"\"\"\n\n // use r0-r7 locally\n push {r0-r7, lr}\n \n // enter RNG dumping mode\n ldr r0, =0x%x // run this many rounds\n ldr r1, =0x%x // dst: store RNG data here\n bl dump_pseudo\n \n // done, let's notify\n //bl notify_hci\n mov r0, 0\n mov r1, 0\n mov r2, 0\n mov r3, 0\n bl 0x1a14 //ok whatever this one produces 2e0000000000000000000000000000000000000000 \n \n // back to lr\n pop {r0-r7, pc}\n \n \n //// the main RNG dumping routine\n dump_pseudo:\n \n // dst is in r1, dump RNG value here\n ldr r2, =0x%x\n ldr r3, [r2]\n str r3, [r1]\n add r1, 4 \n \n // add a test byte to ensure that no other process wrote here\n mov r3, 0x42\n str r3, [r1]\n add r1, 1\n \n // loop for rounds in r0\n subs r0, 1\n bne dump_pseudo\n bx lr\n \n \n \n //// issue an HCI event once we're done\n notify_hci:\n \n push {r0-r4, lr}\n\n // allocate vendor specific hci event\n mov r2, 10 // event length\n mov r0, 12 // event length (+2)\n mov r1, 0xff // type: vendor specific\n bl 0x2770 // bthci_event_AllocateEventAndFillHeader (the r0+r2 variant)\n mov r4, r0 // save pointer to the buffer in r4\n\n // append buffer with \"RAND\"\n add r0, 2 // buffer starts at 2 with data (?)\n ldr r1, =0x444e4152 // RAND\n str r1, [r0]\n add r0, 4 // advance buffer by 4\n\n // send hci event\n mov r0, r4 // back to buffer at offset 0\n\n pop {r0-r4, lr}\n b 0x268E // send_hci_event_without_free()\n \n \n\"\"\" % (MEM_ROUNDS, MEM_RNG, PRAND)\n\ninternalblue = HCICore()\ninternalblue.interface = internalblue.device_list()[0][1] # just use the first device\n\n# setup sockets\nif not internalblue.connect():\n internalblue.logger.critical(\"No connection to target device.\")\n exit(-1)\n\ninternalblue.logger.info(\"installing assembly patches...\")\n\n# Install the RNG code in RAM\ncode = asm(ASM_SNIPPET_RNG, vma=ASM_LOCATION_RNG)\nif not internalblue.writeMem(address=ASM_LOCATION_RNG, data=code, progress_log=None):\n internalblue.logger.critical(\"error!\")\n exit(-1)\n\n# Disable original RNG\npatch = asm(\"bx lr; bx lr\", vma=FUN_RNG) # 2 times bx lr is 4 bytes and we can only patch 4 bytes\nif not internalblue.patchRom(FUN_RNG, patch):\n internalblue.logger.critical(\"Could not disable original RNG!\")\n exit(-1)\n\ninternalblue.logger.info(\"Installed all RNG hooks.\")\nos.system(\"sudo rfkill block wifi\")\ninternalblue.logger.info(\"Disabled Wi-Fi core.\")\n\n\"\"\"\nWe cannot call HCI Read_RAM from this callback as it requires another callback (something goes wrong here),\nso we cannot solve this recursively but need some global status variable. Still, polling this is way faster\nthan polling a status register in the Bluetooth firmware itself.\n\"\"\"\n# global status\ninternalblue.rnd_done = False\n\n\ndef rngStatusCallback(record):\n hcipkt = record[0] # get HCI Event packet\n\n if not issubclass(hcipkt.__class__, hci.HCI_Event):\n return\n\n if hcipkt.data[0:21] == b'\\x2e\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00':\n internalblue.logger.debug(\"Random data done!\")\n internalblue.rnd_done = True\n\n\n# add RNG callback\ninternalblue.registerHciCallback(rngStatusCallback)\n\n# read for multiple rounds to get more experiment data\nrounds = 100\ni = 0\ndata = bytearray()\nwhile rounds > i:\n internalblue.logger.info(\"RNG round %i...\" % i)\n\n # launch assembly snippet\n internalblue.launchRam(ASM_LOCATION_RNG)\n\n # wait until we set the global variable that everything is done\n while not internalblue.rnd_done:\n continue\n internalblue.rnd_done = False\n\n # and now read and save the random\n random = internalblue.readMem(MEM_RNG, MEM_ROUNDS * 5)\n data.extend(random)\n\n i = i + 1\n\ninternalblue.logger.info(\"Finished acquiring random data!\")\n\n# every 5th byte i 0x42\ncheck = data[4::5]\nfor c in check:\n if c != 0x42:\n internalblue.logger.error(\"Data was corrupted by another process!\")\n\n# uhm and for deleting every 5th let's take numpy (oh why??)\ndata = np.delete(data, np.arange(4, data.__len__(), 5))\n\nf = open(\"raspi3p_randomdata_pseudo-%irounds-reg0x%x.bin\" % (rounds, PRAND), \"wb\")\nf.write(data)\nf.close()\n\ninternalblue.logger.info(\"--------------------\")\ninternalblue.logger.info(\"Entering InternalBlue CLI to interpret RNG.\")\n\n# enter CLI\ncli = InternalBlueCLI(Namespace(data_directory=None, verbose=False, trace=None, save=None), internalblue)\nsys.exit(cli.cmdloop())\n","sub_path":"examples/rpi3p_rpi4/randp.py","file_name":"randp.py","file_ext":"py","file_size_in_byte":5975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"409806696","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\nfrom jd_assistant import Assistant\n\nif __name__ == '__main__':\n \"\"\"\n 重要提示:此处为示例代码之一,请移步下面的链接查看使用教程👇\n https://github.com/tychxn/jd-assistant/wiki/1.-%E4%BA%AC%E4%B8%9C%E6%8A%A2%E8%B4%AD%E5%8A%A9%E6%89%8B%E7%94%A8%E6%B3%95\n\n 定时预约功能\n https://github.com/tychxn/jd-assistant/pull/109\n \"\"\"\n\n # sku_ids 商品id\n # sku_ids = '100011521400' # 振德 (ZHENDE) 口罩一次性医用口罩 预约21点 抢购10点 数量3000\n # sku_ids = '100011551632' # 3Q医用口罩 预约15点 抢购20点 数量10000\n # sku_ids = '65708238590' # 袋鼠医生 预约15点 抢购20点 数量??\n\n sku_ids = '100011521400'\n area = '12_904_905_50601' # 区域id\n reserve_time = '2020-02-29 21:00:02.500' # 预约时间\n buy_time = '2020-02-29 10:00:00.800' # 抢购时间\n\n asst = Assistant() # 初始化\n asst.login_by_QRcode() # 扫码登陆\n\n # 1.预约商品\n # asst.make_reserve(sku_id=sku_ids, buy_time=reserve_time)\n # 2个参数\n # sku_id: 商品id\n # buy_time: 预约时间,例如:'2019-11-10 22:41:30.000'\n\n # 2.定时抢购商品(可以加入购物车)\n asst.exec_reserve_seckill_by_time(sku_id=sku_ids, buy_time=buy_time, retry=2, interval=2, num=1)\n # 5个参数\n # sku_id: 商品id\n # buy_time: 抢购时间,例如:'2019-11-10 22:41:30.000'\n # retry: 抢购重复执行次数,可选参数,默认4次\n # interval: 抢购执行间隔,可选参数,默认4秒\n # num: 购买数量,可选参数,默认1个\n\n # 3.定时抢购商品(不可加入购物车)\n # asst.exec_seckill_by_time(sku_ids=sku_ids, buy_time=buy_time, retry=2, interval=2, num=1)\n # 5个参数\n # sku_ids: 商品id,多个商品id用逗号进行分割,如\"123,456,789\"\n # buy_time: 下单时间,例如:'2018-09-28 22:45:50.000'\n # retry: 抢购重复执行次数,可选参数,默认4次\n # interval: 抢购执行间隔,可选参数,默认4秒\n # num: 购买数量,可选参数,默认1个\n","sub_path":"reserve.py","file_name":"reserve.py","file_ext":"py","file_size_in_byte":2121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"9347993","text":"#!/usr/bin/python3\n\n# 6502 NES disassembler Version May 10, 2018\n# for assembly with asm6\n# Doug Fraker 2017-2018\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy of \n# this software and associated documentation files (the \"Software\"), to deal in the \n# Software without restriction, including without limitation the rights to use, copy, \n# modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, \n# and to permit persons to whom the Software is furnished to do so, subject to the \n# following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all \n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, \n# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A \n# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT \n# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF \n# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE \n# OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nimport sys\nimport os\n\nfrom .mapper import MAPPER\n\nclass States(object):\n '''\n Clase para almacenar variables\n #st.count within the small bank\n '''\n def __init__(self, count=0, bankSize=16384, workArray='', \n workArraySmall='', currentBank=''):\n self.count = count\n self.bankSize = bankSize\n self.workArray = workArray\n self.workArraySmall = workArraySmall\n self.currentBank = currentBank\n\n\n# define some functions\n\ndef ToASM(st, byte1,byte2,byte3):\n '''\n st: estados del programa\n byte1: comando\n byte2: arg 1\n byte2: arg 2\n '''\n \n kount2 = 0\n \n if byte1 == \"00\":\n return (\"\\tbrk\\t\\t\\t\\t; 00\") # none\n \n elif byte1 == \"01\":\n st.count += 1\n return (\"\\tora ($\" + byte2 + \", x)\\t; 01 \" + byte2) # (Indirect,X)\n \n elif byte1 == \"05\":\n st.count += 1\n return (\"\\tora $\" + byte2 + \"\\t\\t\\t; 05 \" + byte2) # Zeropage\n \n elif byte1 == \"06\":\n st.count += 1\n return (\"\\tasl $\" + byte2 + \"\\t\\t\\t; 06 \" + byte2) # Zeropage\n \n elif byte1 == \"08\":\n return (\"\\tphp\\t\\t\\t\\t; 08 \") # none\n \n elif byte1 == \"09\":\n st.count += 1\n return (\"\\tora #$\" + byte2 + \"\\t\\t; 09 \" + byte2) # immediate\n \n elif byte1 == \"0a\":\n return (\"\\tasl a\\t\\t\\t; 0a\") # A\n\n elif byte1 == \"0d\":\n st.count += 2\n if (byte3 == \"00\"):\n return (\".hex 0d \"+byte2+\" \"+byte3)\n return (\"\\tora $\" + byte3 + byte2 + \"\\t\\t; 0d \" + byte2 + \" \" + byte3) # absolute\n \n elif byte1 == \"0e\":\n st.count += 2\n if (byte3 == \"00\"):\n return (\".hex 0e \"+byte2+\" \"+byte3)\n return (\"\\tasl $\" + byte3 + byte2 + \"\\t\\t; 0e \" + byte2 + \" \" + byte3) # absolute\n \n elif byte1 == \"10\":\n y = int(byte2, 16)\n if y > 127:\n y -= 256\n kount2 = st.count + y + 2\n z = str(hex(kount2))\n z = z[2:] \n z = z.zfill(4)\n \n st.count += 1\n return (\"\\tbpl B\" +st.currentBank+\"_\"+ z + \" ; 10 \" + byte2) # Relative\n \n elif byte1 == \"11\":\n st.count += 1\n \n return (\"\\tora ($\" + byte2 + \"), y\\t; 11 \" + byte2) # (Indirect),Y\n \n elif byte1 == \"15\":\n st.count += 1\n \n return (\"\\tora $\" + byte2 + \", x\\t\\t; 15 \" + byte2) # Zeropage, x\n \n elif byte1 == \"16\":\n st.count += 1\n \n return (\"\\tasl $\" + byte2 + \", x\\t\\t; 16 \" + byte2) # Zeropage, x\n \n elif byte1 == \"18\":\n return (\"\\tclc\\t\\t\\t\\t; 18 \") # none\n \n elif byte1 == \"19\":\n st.count += 2\n if (byte3 == \"00\"):\n return (\".hex 19 \"+byte2+\" \"+byte3)\n return (\"\\tora $\" + byte3 + byte2 + \", y\\t; 19 \" + byte2 + \" \" + byte3) # absolute, y\n \n elif byte1 == \"1d\":\n st.count += 2\n if (byte3 == \"00\"):\n return (\".hex 1d \"+byte2+\" \"+byte3)\n return (\"\\tora $\" + byte3 + byte2 + \", x\\t; 1d \" + byte2 + \" \" + byte3) # absolute, x\n \n elif byte1 == \"1e\":\n st.count += 2\n if (byte3 == \"00\"):\n return (\".hex 1e \"+byte2+\" \"+byte3)\n return (\"\\tasl $\" + byte3 + byte2 + \", x\\t; 1e \" + byte2 + \" \" + byte3) # absolute, x\n \n elif byte1 == \"20\":\n st.count += 2\n if (byte3 == \"00\"):\n return (\".hex 20 \"+byte2+\" \"+byte3)\n return (\"\\tjsr $\" + byte3 + byte2 + \"\\t\\t; 20 \" + byte2 + \" \" + byte3) # absolute\n \n elif byte1 == \"21\":\n st.count += 1\n \n return (\"\\tand ($\" + byte2 + \", x)\\t; 21 \" + byte2) # (Indirect,X)\n \n elif byte1 == \"24\":\n st.count += 1\n \n return (\"\\tbit $\" + byte2 + \"\\t\\t\\t; 24 \" + byte2) # Zeropage\n \n elif byte1 == \"25\":\n st.count += 1\n \n return (\"\\tand $\" + byte2 + \"\\t\\t\\t; 25 \" + byte2) # Zeropage\n \n elif byte1 == \"26\":\n st.count += 1\n \n return (\"\\trol $\" + byte2 + \"\\t\\t\\t; 26 \" + byte2) # Zeropage\n \n elif byte1 == \"28\":\n return (\"\\tplp\\t\\t\\t\\t; 28 \") # none\n \n elif byte1 == \"29\":\n st.count += 1\n \n return (\"\\tand #$\" + byte2 + \"\\t\\t; 29 \" + byte2) # immediate\n \n elif byte1 == \"2a\":\n return (\"\\trol a\\t\\t\\t; 2a\") # A\n \n elif byte1 == \"2c\":\n st.count += 2\n if (byte3 == \"00\"):\n return (\".hex 2c \"+byte2+\" \"+byte3)\n return (\"\\tbit $\" + byte3 + byte2 + \"\\t\\t; 2c \" + byte2 + \" \" + byte3) # absolute\n \n elif byte1 == \"2d\":\n st.count += 2\n if (byte3 == \"00\"):\n return (\".hex 2d \"+byte2+\" \"+byte3)\n return (\"\\tand $\" + byte3 + byte2 + \"\\t\\t; 2d \" + byte2 + \" \" + byte3) # absolute\n \n elif byte1 == \"2e\":\n st.count += 2\n if (byte3 == \"00\"):\n return (\".hex 2e \"+byte2+\" \"+byte3)\n return (\"\\trol $\" + byte3 + byte2 + \"\\t\\t; 2e \" + byte2 + \" \" + byte3) # absolute\n \n elif byte1 == \"30\":\n y = int(byte2, 16)\n if y > 127:\n y -= 256\n kount2 = st.count + y + 2\n z = str(hex(kount2))\n z = z[2:] \n z = z.zfill(4)\n \n st.count += 1\n return (\"\\tbmi B\" +st.currentBank+\"_\"+ z + \" ; 30 \" + byte2) # Relative\n \n elif byte1 == \"31\":\n st.count += 1\n \n return (\"\\tand ($\" + byte2 + \"), y\\t; 31 \" + byte2) # (Indirect),Y\n \n elif byte1 == \"35\":\n st.count += 1\n \n return (\"\\tand $\" + byte2 + \", x\\t\\t; 35 \" + byte2) # Zeropage, x\n \n elif byte1 == \"36\":\n st.count += 1\n \n return (\"\\trol $\" + byte2 + \", x\\t\\t; 36 \" + byte2) # Zeropage, x\n \n elif byte1 == \"38\":\n return (\"\\tsec\\t\\t\\t\\t; 38 \") # none\n \n elif byte1 == \"39\":\n st.count += 2\n if (byte3 == \"00\"):\n return (\".hex 39 \"+byte2+\" \"+byte3)\n return (\"\\tand $\" + byte3 + byte2 + \", y\\t; 39 \" + byte2 + \" \" + byte3) # absolute, y\n \n elif byte1 == \"3d\":\n st.count += 2\n if (byte3 == \"00\"):\n return (\".hex 3d \"+byte2+\" \"+byte3)\n return (\"\\tand $\" + byte3 + byte2 + \", x\\t; 3d \" + byte2 + \" \" + byte3) # absolute, x\n \n elif byte1 == \"3e\":\n st.count += 2\n if (byte3 == \"00\"):\n return (\".hex 3e \"+byte2+\" \"+byte3)\n return (\"\\trol $\" + byte3 + byte2 + \", x\\t; 3e \" + byte2 + \" \" + byte3) # absolute, x\n \n elif byte1 == \"40\":\n return (\"\\trti\\t\\t\\t\\t; 40 \") # none\n \n elif byte1 == \"41\":\n st.count += 1\n \n return (\"\\teor ($\" + byte2 + \", x)\\t; 41 \" + byte2) # (Indirect,X)\n \n elif byte1 == \"45\":\n st.count += 1\n \n return (\"\\teor $\" + byte2 + \"\\t\\t\\t; 45 \" + byte2) # Zeropage\n \n elif byte1 == \"46\":\n st.count += 1\n \n return (\"\\tlsr $\" + byte2 + \"\\t\\t\\t; 46 \" + byte2) # Zeropage\n \n elif byte1 == \"48\":\n return (\"\\tpha\\t\\t\\t\\t; 48 \") # none\n \n elif byte1 == \"49\":\n st.count += 1\n \n return (\"\\teor #$\" + byte2 + \"\\t\\t; 49 \" + byte2) # immediate\n \n elif byte1 == \"4a\":\n return (\"\\tlsr a\\t\\t\\t; 4a\") # A\n \n elif byte1 == \"4c\":\n st.count += 2\n if (byte3 == \"00\"):\n return (\".hex 4c \"+byte2+\" \"+byte3)\n return (\"\\tjmp $\" + byte3 + byte2 + \"\\t\\t; 4c \" + byte2 + \" \" + byte3) # absolute\n \n elif byte1 == \"4d\":\n st.count += 2\n if (byte3 == \"00\"):\n return (\".hex 4d \"+byte2+\" \"+byte3)\n return (\"\\teor $\" + byte3 + byte2 + \"\\t\\t; 4d \" + byte2 + \" \" + byte3) # absolute\n \n elif byte1 == \"4e\":\n st.count += 2\n if (byte3 == \"00\"):\n return (\".hex 4e \"+byte2+\" \"+byte3)\n return (\"\\tlsr $\" + byte3 + byte2 + \"\\t\\t; 4e \" + byte2 + \" \" + byte3) # absolute\n \n elif byte1 == \"50\":\n y = int(byte2, 16)\n if y > 127:\n y -= 256\n kount2 = st.count + y + 2\n z = str(hex(kount2))\n z = z[2:] \n z = z.zfill(4)\n \n st.count += 1\n return (\"\\tbvc B\" +st.currentBank+\"_\"+ z + \" ; 50 \" + byte2) # Relative\n \n \n elif byte1 == \"51\":\n st.count += 1\n \n return (\"\\teor ($\" + byte2 + \"), y\\t; 51 \" + byte2) # (Indirect),Y\n \n elif byte1 == \"55\":\n st.count += 1\n \n return (\"\\teor $\" + byte2 + \", x\\t\\t; 55 \" + byte2) # Zeropage, x\n \n elif byte1 == \"56\":\n st.count += 1\n \n return (\"\\tlsr $\" + byte2 + \", x\\t\\t; 56 \" + byte2) # Zeropage, x\n \n elif byte1 == \"58\":\n return (\"\\tcli\\t\\t\\t\\t; 58 \") # none\n \n elif byte1 == \"59\":\n st.count += 2\n if (byte3 == \"00\"):\n return (\".hex 59 \"+byte2+\" \"+byte3)\n return (\"\\teor $\" + byte3 + byte2 + \", y\\t; 59 \" + byte2 + \" \" + byte3) # absolute, y\n \n elif byte1 == \"5d\":\n st.count += 2\n if (byte3 == \"00\"):\n return (\".hex 5d \"+byte2+\" \"+byte3)\n return (\"\\teor $\" + byte3 + byte2 + \", x\\t; 5d \" + byte2 + \" \" + byte3) # absolute, x\n \n elif byte1 == \"5e\":\n st.count += 2\n if (byte3 == \"00\"):\n return (\".hex 5e \"+byte2+\" \"+byte3)\n return (\"\\tlsr $\" + byte3 + byte2 + \", x\\t; 5e \" + byte2 + \" \" + byte3) # absolute, x\n \n elif byte1 == \"60\":\n return (\"\\trts\\t\\t\\t\\t; 60 \") # none\n \n elif byte1 == \"61\":\n st.count += 1\n \n return (\"\\tadc ($\" + byte2 + \", x)\\t; 61 \" + byte2) # (Indirect,X)\n \n elif byte1 == \"65\":\n st.count += 1\n \n return (\"\\tadc $\" + byte2 + \"\\t\\t\\t; 65 \" + byte2) # Zeropage\n \n elif byte1 == \"66\":\n st.count += 1\n \n return (\"\\tror $\" + byte2 + \"\\t\\t\\t; 66 \" + byte2) # Zeropage\n \n elif byte1 == \"68\":\n return (\"\\tpla\\t\\t\\t\\t; 68 \") # none\n\n elif byte1 == \"69\":\n st.count += 1\n \n return (\"\\tadc #$\" + byte2 + \"\\t\\t; 69 \" + byte2) # immediate\n \n elif byte1 == \"6a\":\n return (\"\\tror a\\t\\t\\t; 6a\") # A\n \n elif byte1 == \"6c\":\n st.count += 2\n if (byte3 == \"00\"):\n return (\".hex 6c \"+byte2+\" \"+byte3)\n return (\"\\tjmp ($\" + byte3 + byte2 + \")\\t\\t; 6c \" + byte2 + \" \" + byte3) # absolute (indirect)\n \n elif byte1 == \"6d\":\n st.count += 2\n if (byte3 == \"00\"):\n return (\".hex 6d \"+byte2+\" \"+byte3)\n return (\"\\tadc $\" + byte3 + byte2 + \"\\t\\t; 6d \" + byte2 + \" \" + byte3) # absolute\n \n elif byte1 == \"6e\":\n st.count += 2\n if (byte3 == \"00\"):\n return (\".hex 6e \"+byte2+\" \"+byte3)\n return (\"\\tror $\" + byte3 + byte2 + \"\\t\\t; 6e \" + byte2 + \" \" + byte3) # absolute\n \n elif byte1 == \"70\":\n y = int(byte2, 16)\n if y > 127:\n y -= 256\n kount2 = st.count + y + 2\n z = str(hex(kount2))\n z = z[2:] \n z = z.zfill(4)\n \n st.count += 1\n return (\"\\tbvs B\" +st.currentBank+\"_\"+ z + \" ; 70 \" + byte2) # Relative\n \n elif byte1 == \"71\":\n st.count += 1\n \n return (\"\\tadc ($\" + byte2 + \"), y\\t; 71 \" + byte2) # (Indirect),Y\n \n elif byte1 == \"75\":\n st.count += 1\n \n return (\"\\tadc $\" + byte2 + \", x\\t\\t; 75 \" + byte2) # Zeropage, x\n \n elif byte1 == \"76\":\n st.count += 1\n \n return (\"\\tror $\" + byte2 + \", x\\t\\t; 76 \" + byte2) # Zeropage, x\n \n elif byte1 == \"78\":\n return (\"\\tsei\\t\\t\\t\\t; 78 \") # none\n \n elif byte1 == \"79\":\n st.count += 2\n if (byte3 == \"00\"):\n return (\".hex 79 \"+byte2+\" \"+byte3)\n return (\"\\tadc $\" + byte3 + byte2 + \", y\\t; 79 \" + byte2 + \" \" + byte3) # absolute, y\n \n elif byte1 == \"7d\":\n st.count += 2\n if (byte3 == \"00\"):\n return (\".hex 7d \"+byte2+\" \"+byte3)\n return (\"\\tadc $\" + byte3 + byte2 + \", x\\t; 7d \" + byte2 + \" \" + byte3) # absolute, x\n \n elif byte1 == \"7e\":\n st.count += 2\n if (byte3 == \"00\"):\n return (\".hex 7e \"+byte2+\" \"+byte3)\n return (\"\\tror $\" + byte3 + byte2 + \", x\\t; 7e \" + byte2 + \" \" + byte3) # absolute, x\n \n elif byte1 == \"81\":\n st.count += 1\n \n return (\"\\tsta ($\" + byte2 + \", x)\\t; 81 \" + byte2) # (Indirect,X)\n \n elif byte1 == \"84\":\n st.count += 1\n \n return (\"\\tsty $\" + byte2 + \"\\t\\t\\t; 84 \" + byte2) # Zeropage\n \n elif byte1 == \"85\":\n st.count += 1\n \n return (\"\\tsta $\" + byte2 + \"\\t\\t\\t; 85 \" + byte2) # Zeropage\n \n elif byte1 == \"86\":\n st.count += 1\n \n return (\"\\tstx $\" + byte2 + \"\\t\\t\\t; 86 \" + byte2) # Zeropage\n \n elif byte1 == \"88\":\n return (\"\\tdey\\t\\t\\t\\t; 88 \") # none\n \n elif byte1 == \"8a\":\n return (\"\\ttxa\\t\\t\\t\\t; 8a \") # none\n \n elif byte1 == \"8c\":\n st.count += 2\n if (byte3 == \"00\"):\n return (\".hex 8c \"+byte2+\" \"+byte3)\n return (\"\\tsty $\" + byte3 + byte2 + \"\\t\\t; 8c \" + byte2 + \" \" + byte3) # absolute \n \n elif byte1 == \"8d\":\n st.count += 2\n if (byte3 == \"00\"):\n return (\".hex 8d \"+byte2+\" \"+byte3)\n return (\"\\tsta $\" + byte3 + byte2 + \"\\t\\t; 8d \" + byte2 + \" \" + byte3) # absolute\n \n elif byte1 == \"8e\":\n st.count += 2\n if (byte3 == \"00\"):\n return (\".hex 8e \"+byte2+\" \"+byte3)\n return (\"\\tstx $\" + byte3 + byte2 + \"\\t\\t; 8e \" + byte2 + \" \" + byte3) # absolute \n \n elif byte1 == \"90\":\n y = int(byte2, 16)\n if y > 127:\n y -= 256\n kount2 = st.count + y + 2\n z = str(hex(kount2))\n z = z[2:] \n z = z.zfill(4)\n \n st.count += 1\n return (\"\\tbcc B\" +st.currentBank+\"_\"+ z + \" ; 90 \" + byte2) # Relative\n \n elif byte1 == \"91\":\n st.count += 1\n \n return (\"\\tsta ($\" + byte2 + \"), y\\t; 91 \" + byte2) # (Indirect),Y\n \n elif byte1 == \"94\":\n st.count += 1\n \n return (\"\\tsty $\" + byte2 + \", x\\t\\t; 94 \" + byte2) # Zeropage, x\n \n elif byte1 == \"95\":\n st.count += 1\n \n return (\"\\tsta $\" + byte2 + \", x\\t\\t; 95 \" + byte2) # Zeropage, x\n \n elif byte1 == \"96\":\n st.count += 1\n \n return (\"\\tstx $\" + byte2 + \", y\\t\\t; 96 \" + byte2) # Zeropage, y\n \n elif byte1 == \"98\":\n return (\"\\ttya\\t\\t\\t\\t; 98 \") # none\n \n elif byte1 == \"99\":\n st.count += 2\n if (byte3 == \"00\"):\n return (\".hex 99 \"+byte2+\" \"+byte3)\n return (\"\\tsta $\" + byte3 + byte2 + \", y\\t; 99 \" + byte2 + \" \" + byte3) # absolute, y\n \n elif byte1 == \"9a\":\n return (\"\\ttxs\\t\\t\\t\\t; 9a \") # none\n \n elif byte1 == \"9d\":\n st.count += 2\n if (byte3 == \"00\"):\n return (\".hex 9d \"+byte2+\" \"+byte3)\n return (\"\\tsta $\" + byte3 + byte2 + \", x\\t; 9d \" + byte2 + \" \" + byte3) # absolute, x\n \n elif byte1 == \"a0\":\n st.count += 1\n \n return (\"\\tldy #$\" + byte2 + \"\\t\\t; a0 \" + byte2) # immediate\n \n elif byte1 == \"a1\":\n st.count += 1\n \n return (\"\\tlda ($\" + byte2 + \", x)\\t; a1 \" + byte2) # (Indirect,X)\n \n elif byte1 == \"a2\":\n st.count += 1\n \n return (\"\\tldx #$\" + byte2 + \"\\t\\t; a2 \" + byte2) # immediate\n \n elif byte1 == \"a4\":\n st.count += 1\n \n return (\"\\tldy $\" + byte2 + \"\\t\\t\\t; a4 \" + byte2) # Zeropage\n \n elif byte1 == \"a5\":\n st.count += 1\n \n return (\"\\tlda $\" + byte2 + \"\\t\\t\\t; a5 \" + byte2) # Zeropage\n \n elif byte1 == \"a6\":\n st.count += 1\n \n return (\"\\tldx $\" + byte2 + \"\\t\\t\\t; a6 \" + byte2) # Zeropage\n \n elif byte1 == \"a8\":\n return (\"\\ttay\\t\\t\\t\\t; a8 \") # none \n \n elif byte1 == \"a9\":\n st.count += 1\n \n return (\"\\tlda #$\" + byte2 + \"\\t\\t; a9 \" + byte2) # immediate\n \n elif byte1 == \"aa\":\n return (\"\\ttax\\t\\t\\t\\t; aa \") # none\n \n elif byte1 == \"ac\":\n st.count += 2\n if (byte3 == \"00\"):\n return (\".hex ac \"+byte2+\" \"+byte3)\n return (\"\\tldy $\" + byte3 + byte2 + \"\\t\\t; ac \" + byte2 + \" \" + byte3) # absolute\n \n elif byte1 == \"ad\":\n st.count += 2\n if (byte3 == \"00\"):\n return (\".hex ad \"+byte2+\" \"+byte3)\n return (\"\\tlda $\" + byte3 + byte2 + \"\\t\\t; ad \" + byte2 + \" \" + byte3) # absolute\n \n elif byte1 == \"ae\":\n st.count += 2\n if (byte3 == \"00\"):\n return (\".hex ae \"+byte2+\" \"+byte3)\n return (\"\\tldx $\" + byte3 + byte2 + \"\\t\\t; ae \" + byte2 + \" \" + byte3) # absolute\n \n elif byte1 == \"b0\":\n y = int(byte2, 16)\n if y > 127:\n y -= 256\n kount2 = st.count + y + 2\n z = str(hex(kount2))\n z = z[2:] \n z = z.zfill(4)\n \n st.count += 1\n return (\"\\tbcs B\" +st.currentBank+\"_\"+ z + \" ; b0 \" + byte2) # Relative\n \n elif byte1 == \"b1\":\n st.count += 1\n \n return (\"\\tlda ($\" + byte2 + \"), y\\t; b1 \" + byte2) # (Indirect),Y\n \n elif byte1 == \"b4\":\n st.count += 1\n \n return (\"\\tldy $\" + byte2 + \", x\\t\\t; b4 \" + byte2) # Zeropage, x\n \n elif byte1 == \"b5\":\n st.count += 1\n \n return (\"\\tlda $\" + byte2 + \", x\\t\\t; b5 \" + byte2) # Zeropage, x\n \n elif byte1 == \"b6\":\n st.count += 1\n \n return (\"\\tldx $\" + byte2 + \", y\\t\\t; b6 \" + byte2) # Zeropage, y\n \n elif byte1 == \"b8\":\n return (\"\\tclv\\t\\t\\t\\t; b8 \") # none\n \n elif byte1 == \"b9\":\n st.count += 2\n if (byte3 == \"00\"):\n return (\".hex b9 \"+byte2+\" \"+byte3)\n return (\"\\tlda $\" + byte3 + byte2 + \", y\\t; b9 \" + byte2 + \" \" + byte3) # absolute, y\n \n elif byte1 == \"ba\":\n return (\"\\ttsx\\t\\t\\t\\t; ba \") # none\n \n elif byte1 == \"bc\":\n st.count += 2\n if (byte3 == \"00\"):\n return (\".hex bc \"+byte2+\" \"+byte3)\n return (\"\\tldy $\" + byte3 + byte2 + \", x\\t; bc \" + byte2 + \" \" + byte3) # absolute, x\n \n elif byte1 == \"bd\":\n st.count += 2\n if (byte3 == \"00\"):\n return (\".hex bd \"+byte2+\" \"+byte3)\n return (\"\\tlda $\" + byte3 + byte2 + \", x\\t; bd \" + byte2 + \" \" + byte3) # absolute, x\n \n elif byte1 == \"be\":\n st.count += 2\n if (byte3 == \"00\"):\n return (\".hex be \"+byte2+\" \"+byte3)\n return (\"\\tldx $\" + byte3 + byte2 + \", y\\t; be \" + byte2 + \" \" + byte3) # absolute, y\n \n elif byte1 == \"c0\":\n st.count += 1\n \n return (\"\\tcpy #$\" + byte2 + \"\\t\\t; c0 \" + byte2) # immediate\n \n elif byte1 == \"c1\":\n st.count += 1\n \n return (\"\\tcmp ($\" + byte2 + \", x)\\t; c1 \" + byte2) # (Indirect,X)\n \n elif byte1 == \"c4\":\n st.count += 1\n \n return (\"\\tcpy $\" + byte2 + \"\\t\\t\\t; c4 \" + byte2) # Zeropage \n \n elif byte1 == \"c5\":\n st.count += 1\n \n return (\"\\tcmp $\" + byte2 + \"\\t\\t\\t; c5 \" + byte2) # Zeropage\n \n elif byte1 == \"c6\":\n st.count += 1\n \n return (\"\\tdec $\" + byte2 + \"\\t\\t\\t; c6 \" + byte2) # Zeropage\n \n elif byte1 == \"c8\":\n return (\"\\tiny\\t\\t\\t\\t; c8 \") # none\n \n elif byte1 == \"c9\":\n st.count += 1\n \n return (\"\\tcmp #$\" + byte2 + \"\\t\\t; c9 \" + byte2) # immediate\n \n elif byte1 == \"ca\":\n return (\"\\tdex\\t\\t\\t\\t; ca \") # none\n \n elif byte1 == \"cc\":\n st.count += 2\n if (byte3 == \"00\"):\n return (\".hex cc \"+byte2+\" \"+byte3)\n return (\"\\tcpy $\" + byte3 + byte2 + \"\\t\\t; cc \" + byte2 + \" \" + byte3) # absolute\n \n elif byte1 == \"cd\":\n st.count += 2\n if (byte3 == \"00\"):\n return (\".hex cd \"+byte2+\" \"+byte3)\n return (\"\\tcmp $\" + byte3 + byte2 + \"\\t\\t; cd \" + byte2 + \" \" + byte3) # absolute\n \n elif byte1 == \"ce\":\n st.count += 2\n if (byte3 == \"00\"):\n return (\".hex ce \"+byte2+\" \"+byte3)\n return (\"\\tdec $\" + byte3 + byte2 + \"\\t\\t; ce \" + byte2 + \" \" + byte3) # absolute\n \n elif byte1 == \"d0\":\n y = int(byte2, 16)\n if y > 127:\n y -= 256\n kount2 = st.count + y + 2\n z = str(hex(kount2))\n z = z[2:] \n z = z.zfill(4)\n \n st.count += 1\n return (\"\\tbne B\" +st.currentBank+\"_\"+ z + \" ; d0 \" + byte2) # Relative \n \n elif byte1 == \"d1\":\n st.count += 1\n \n return (\"\\tcmp ($\" + byte2 + \"), y\\t; d1 \" + byte2) # (Indirect),Y\n \n elif byte1 == \"d5\":\n st.count += 1\n \n return (\"\\tcmp $\" + byte2 + \", x\\t\\t; d5 \" + byte2) # Zeropage, x\n \n elif byte1 == \"d6\":\n st.count += 1\n \n return (\"\\tdec $\" + byte2 + \", x\\t\\t; d6 \" + byte2) # Zeropage, x\n \n elif byte1 == \"d8\":\n return (\"\\tcld\\t\\t\\t\\t; b8 \") # none\n \n elif byte1 == \"d9\":\n st.count += 2\n if (byte3 == \"00\"):\n return (\".hex d9 \"+byte2+\" \"+byte3)\n return (\"\\tcmp $\" + byte3 + byte2 + \", y\\t; d9 \" + byte2 + \" \" + byte3) # absolute, y\n \n elif byte1 == \"dd\":\n st.count += 2\n if (byte3 == \"00\"):\n return (\".hex dd \"+byte2+\" \"+byte3)\n return (\"\\tcmp $\" + byte3 + byte2 + \", x\\t; dd \" + byte2 + \" \" + byte3) # absolute, x\n \n elif byte1 == \"de\":\n st.count += 2\n if (byte3 == \"00\"):\n return (\".hex de \"+byte2+\" \"+byte3)\n return (\"\\tdec $\" + byte3 + byte2 + \", x\\t; de \" + byte2 + \" \" + byte3) # absolute, x\n \n elif byte1 == \"e0\":\n st.count += 1\n \n return (\"\\tcpx #$\" + byte2 + \"\\t\\t; e0 \" + byte2) # immediate\n \n elif byte1 == \"e1\":\n st.count += 1\n \n return (\"\\tsbc ($\" + byte2 + \", x)\\t; e1 \" + byte2) # (Indirect,X)\n \n elif byte1 == \"e4\":\n st.count += 1\n \n return (\"\\tcpx $\" + byte2 + \"\\t\\t\\t; e4 \" + byte2) # Zeropage\n \n elif byte1 == \"e5\":\n st.count += 1\n \n return (\"\\tsbc $\" + byte2 + \"\\t\\t\\t; e5 \" + byte2) # Zeropage\n \n elif byte1 == \"e6\":\n st.count += 1\n \n return (\"\\tinc $\" + byte2 + \"\\t\\t\\t; e6 \" + byte2) # Zeropage\n \n elif byte1 == \"e8\":\n return (\"\\tinx\\t\\t\\t\\t; e8 \") # none\n \n elif byte1 == \"e9\":\n st.count += 1\n \n return (\"\\tsbc #$\" + byte2 + \"\\t\\t; e9 \" + byte2) # immediate\n \n elif byte1 == \"ea\":\n return (\"\\tnop\\t\\t\\t\\t; ea \") # none\n \n elif byte1 == \"ec\":\n st.count += 2\n if (byte3 == \"00\"):\n return (\".hex ec \"+byte2+\" \"+byte3)\n return (\"\\tcpx $\" + byte3 + byte2 + \"\\t\\t; ec \" + byte2 + \" \" + byte3) # absolute\n \n elif byte1 == \"ed\":\n st.count += 2\n if (byte3 == \"00\"):\n return (\".hex ed \"+byte2+\" \"+byte3)\n return (\"\\tsbc $\" + byte3 + byte2 + \"\\t\\t; ed\" + byte2 + \" \" + byte3) # absolute\n \n elif byte1 == \"ee\":\n st.count += 2\n if (byte3 == \"00\"):\n return (\".hex ee \"+byte2+\" \"+byte3)\n return (\"\\tinc $\" + byte3 + byte2 + \"\\t\\t; ee \" + byte2 + \" \" + byte3) # absolute\n \n elif byte1 == \"f0\":\n y = int(byte2, 16)\n if y > 127:\n y -= 256\n kount2 = st.count + y + 2\n z = str(hex(kount2))\n z = z[2:] \n z = z.zfill(4)\n \n st.count += 1\n return (\"\\tbeq B\" +st.currentBank+\"_\"+ z + \" ; f0 \" + byte2) # Relative \n \n elif byte1 == \"f1\":\n st.count += 1\n \n return (\"\\tsbc ($\" + byte2 + \"), y\\t; f1 \" + byte2) # (Indirect),Y\n \n elif byte1 == \"f5\":\n st.count += 1\n \n return (\"\\tsbc $\" + byte2 + \", x\\t\\t; f5 \" + byte2) # Zeropage, x \n \n elif byte1 == \"f6\":\n st.count += 1\n \n return (\"\\tinc $\" + byte2 + \", x\\t\\t; f6 \" + byte2) # Zeropage, x\n \n elif byte1 == \"f8\":\n return (\"\\tsed\\t\\t\\t\\t; f8 \") # none\n \n elif byte1 == \"f9\":\n st.count += 2\n if (byte3 == \"00\"):\n return (\".hex f9 \"+byte2+\" \"+byte3)\n return (\"\\tsbc $\" + byte3 + byte2 + \", y\\t; f9 \" + byte2 + \" \" + byte3) # absolute, y\n \n elif byte1 == \"fd\":\n st.count += 2\n if (byte3 == \"00\"):\n return (\".hex fd \"+byte2+\" \"+byte3)\n return (\"\\tsbc $\" + byte3 + byte2 + \", x\\t; fd \" + byte2 + \" \" + byte3) # absolute, x\n \n elif byte1 == \"fe\":\n st.count += 2\n if (byte3 == \"00\"):\n return (\".hex fe \"+byte2+\" \"+byte3)\n return (\"\\tinc $\" + byte3 + byte2 + \", x\\t; fe \" + byte2 + \" \" + byte3) # absolute, x \n \n else:\n return (\".db $\" + byte1) # unknown opcode\n\n\n#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n# mainFunction\ndef dasm6(path, outName=None):\n '''\n path: Archivo de origen\n outName: nombre del archivo de salida\n '''\n # initialize some variables\n st = States()\n # START OF PROGRAM\n\n filename = os.path.basename(path)\n try:\n fileIn = open(path, \"rb\") #read bytes\n except:\n print(\"\\nERROR: couldn't find file\\n\")\n raise\n \n print (filename)\n filesize = os.path.getsize(path)\n print(\"filesize = \", filesize)\n folder = os.path.dirname(path)\n \n st.workArray = fileIn.read() #make a big int array \n\n testarray = b'NES\\x1a' # NES 1a\n\n # validate header \n if testarray != st.workArray[0:4]:\n print(\"\\nERROR: couldn't find iNES header\\n\")\n exit()\n\n # get ROM sizes\n \n prgROM = st.workArray[4]\n prgROMtotal = prgROM * 0x4000\n print (\"PRGROM size = \", prgROM, \" = \", prgROMtotal)\n\n chrROM = st.workArray[5]\n chrROMtotal = chrROM * 0x2000\n print (\"CHRROM size = \", chrROM, \" = \", chrROMtotal) \n \n a = 16 + prgROMtotal + chrROMtotal\n print (\"Header + PRGROM + CHRROM = \", a)\n if (filesize != a): \n print (\"\\nERROR: filesize does not match the header\")\n if (filesize < a):\n exit()\n else:\n print (\"Will try to disassemble anyway.\\n\") \n else:\n print (\"filesize matches header, ok\")\n \n \n # get mapper \n byte6 = st.workArray[6]\n a = byte6 >> 4\n byte7 = st.workArray[7]\n b = byte7 & 0xf0\n c = a + b\n\n if (prgROM == 2):\n st.bankSize = 32768\n #1/2 = 8192\n Map = MAPPER.get(c,\"Other / Too Lazy to type them all in.\")\n if isinstance(Map, tuple):\n # Si el mapper es inusual, se especifica el\n # tamaño del banco\n Map , st.bankSize = Map\n print (\"Mapper number = \", c, \" = \", Map) \n\n\n # mirroring = low bit of byte6\n a = byte6 & 0x08 # 4 screen\n if (a == 0):\n a = byte6 & 0x01 # 2 screen\n \n if (a == 0):\n print (\"horizontal mirroring\")\n elif (a == 1):\n print (\"vertical mirroring\")\n else:\n print (\"4 screen mode\")\n \n \n # extra RAM at 6000 = byte6 ? bit 2\n\n a = byte6 & 0x02\n if (a != 0):\n print (\"extra RAM at $6000, yes\")\n \n # sanity check \n\n if prgROM == 0 or filesize < 16400:\n print (\"file too small, not valid\")\n exit()\n \n # split ROM into 2 binary files, PRG minus the header (called .bin), and CHR\n\n newName = os.path.splitext(filename)[0] # strip the extension\n #%%%%%%%%%%%%%%%%%% Ajustado\n if outName !=None:\n newName = outName.split('.')[0]\n newPath = os.path.join(folder, newName + \".bin\") \n \n with open(newPath,'wb') as fileOut:\n fileOut.write(st.workArray[16:prgROMtotal+16])\n \n print (newName+ \".bin created\")\n\n #chrROMtotal\n if (chrROM != 0):\n newPath = os.path.join(folder, newName + \".chr\") \n \n with open(newPath,'wb') as fileOut:\n fileOut.write(st.workArray[prgROMtotal+16: chrROMtotal+prgROMtotal+16])\n print (newName+ \".chr created\")\n \n else:\n print (\"No CHR\")\n\n \n # get bank size, from user\n Valid = 0\n b = 0\n print(\"Recommended bank size = \", st.bankSize)\n while (Valid == 0):\n a = input(\"OK? Y/N:\")\n if a == \"Y\" or a == \"y\":\n Valid = 1\n else:\n bankDic ={\"1\":8192,\"2\":16384, '4':32768}\n while (b == 0):\n b = input(\"1 = 8192, 2 = 16384, 4 = 32768:\")\n if b in bankDic:\n st.bankSize = bankDic[b]\n valid = 1\n break\n else:\n b = 0\n\n\n if (st.bankSize > prgROMtotal):\n print(\"exceeds total PRG ROM size...\")\n st.bankSize = prgROMtotal\n \n print(\"st.bankSize = \", st.bankSize)\n\n # start writing the MAIN ASM file\n\n newPath = os.path.join(folder, newName + \".asm\") \n \n fileOutMain = open(newPath,\"w\") # write text\n print (newName+ \".asm created\")\n\n \n fileOutMain.write (\"; \" + filename + \" disassembly\\n\")\n fileOutMain.write (\"; for asm6\\n\\n\")\n\n fileOutMain.write (\"; *** HEADER ***\\n\\n\")\n fileOutMain.write (\".db \\\"NES\\\", $1a\\n\")\n\n\n a = st.workArray[4] # byte 4\n c = str(a)\n fileOutMain.write (\".db \" + c + \" ; = number of PRG banks * $4000\\n\")\n\n a = st.workArray[5] # byte 5\n c = str(a)\n fileOutMain.write (\".db \" + c + \" ; = number of CHR banks * $2000\\n\")\n\n a = st.workArray[6] # byte 6\n c = str(a)\n fileOutMain.write (\".db \" + c + \"\\t; \" + Map + \"\\n\")\n\n a = st.workArray[7] # byte 7\n c = str(a)\n fileOutMain.write (\".db \" + c + \"\\n\")\n\n a = st.workArray[8] # byte 8\n c = str(a)\n fileOutMain.write (\".db \" + c + \"\\n\")\n\n a = st.workArray[9] # byte 9\n c = str(a)\n fileOutMain.write (\".db \" + c + \"\\n\")\n\n a = st.workArray[10] # byte 10\n c = str(a)\n fileOutMain.write (\".db \" + c + \"\\n\")\n fileOutMain.write (\".db 0,0,0,0,0\\n\\n\") # bytes 11-15\n\n\n fileOutMain.write (\"; *** PRG ROM ***\\n\\n\")\n\n if prgROM > 1:\n fileOutMain.write (\".base $8000\\n\\n\") # default starting address\n else:\n fileOutMain.write (\".base $c000\\n\\n\") # default starting address\n\n st.workArraySmall = [0] * st.bankSize\n\n\n\n # start writing the other ASM files, bank by bank\n\n bankNumberTotal = int (prgROMtotal / st.bankSize)\n\n for bankNumber in range (0,bankNumberTotal):\n st.currentBank = str(bankNumber)\n fileOutMain.write(\".include \"+newName + st.currentBank + \".asm\\n\\n\")\n newPath = os.path.join(folder, newName + st.currentBank + \".asm\") \n \n fileOutSmall = open(newPath,\"w+\") # write text, and read it\n print (newName+st.currentBank+ \".asm created\")\n\n fileOutSmall.write (\";\"+newName+st.currentBank+\"\\n\\n\\n\\n\")\n \n #create a smaller array\n for i in range (0,st.bankSize):\n j = i + 16 + (bankNumber*st.bankSize)\n st.workArraySmall[i] = st.workArray[j] # note both int arrays\n \n # decode the array\n st.count = 0\n while (st.count < st.bankSize-2): # change later ?\n a = st.workArraySmall[st.count] # get 3 bytes, just in case\n first = str (hex (a)) #convert int to hex string\n first = first[2:] # strip the 0x off\n first = first.zfill(2) # at least 2 wide, fill zero\n a = st.workArraySmall[st.count+1]\n second = str (hex (a))\n second = second[2:]\n second = second.zfill(2)\n a = st.workArraySmall[st.count+2]\n third = str (hex (a))\n third = third[2:]\n third = third.zfill(2)\n \n z = str(hex(st.count))\n z = z[2:] \n z = z.zfill(4)\n \n fileOutSmall.write(\"B\"+st.currentBank+\"_\"+z+\":\\t\")\n \n outString = ToASM(st, first,second,third)\n fileOutSmall.write(outString+\"\\n\")\n \n st.count += 1\n \n # print the final bytes... if needed\n if (st.count < st.bankSize):\n a = st.workArraySmall[st.count] # get 3 bytes, just in case\n first = str (hex (a)) #convert int to hex string\n first = first[2:] # strip the 0x off\n first = first.zfill(2) # at least 2 wide, fill zero\n fileOutSmall.write(\"\\t\\t.db $\" + first+\"\\n\")\n st.count += 1\n if (st.count < st.bankSize):\n a = st.workArraySmall[st.count] # get 3 bytes, just in case\n first = str (hex (a)) #convert int to hex string\n first = first[2:] # strip the 0x off\n first = first.zfill(2) # at least 2 wide, fill zero\n fileOutSmall.write(\"\\t\\t.db $\" + first+\"\\n\")\n st.count += 1 \n \n \n \n # remove broken labels \n \n fileOutSmall.seek(0) # needed ?\n contents = fileOutSmall.readlines()\n listAll = []\n for i in range(len(contents)):\n listAll.extend(contents[i].split())\n \n listLabels = [] # make a list of all labels in sub-file\n \n word = \"\"\n last = \"\"\n position = 0\n \n loop = len(listAll)\n for i in range (0,loop):\n word = str(listAll[i])\n last = word[-1:]\n if last == \":\":\n word = word [:-1]\n listLabels.append(word)\n \n # see if reference to label, if not, remove it.\n fileOutSmall.seek(0)\n filedata = fileOutSmall.read()\n \n for i in range (0,loop):\n \n word = str(listAll[i])\n if word == \"bcc\" or word == \"bcs\" or word == \"bvc\" or word == \"bvs\" \\\n or word == \"beq\" or word == \"bne\" or word == \"bmi\" or word == \"bpl\":\n word2 = str(listAll[i+1])\n if word2 not in listLabels:\n\n # kill the word in the the original text file now\n fullword = word+\" \"+word2+\" ;\"\n filedata = filedata.replace(fullword, \";removed\\n\\t.hex \") # replace it with this\n \n\n \n fileOutSmall.seek(0)\n fileOutSmall.write(filedata)\n fileOutSmall.close\n \n if bankNumberTotal > bankNumber+1:\n fileOutMain.write (\".base $8000\\n\\n\") # default starting address, maybe fix this later\n\n # end of sub bank asm decode loop \n \n fileOutMain.write (\"\\n\\n; *** CHR ROM ***\\n\\n\") \n if (chrROM != 0):\n fileOutMain.write (\".incbin \"+newName+\".chr\\n\\n\")\n else:\n fileOutMain.write (\";No CHR ROM\\n\\n\")\n fileOutMain.close\n fileIn.close \n\n print (\"done!\")\n\n\nif __name__ == '__main__':\n if len(sys.argv) < 2:\n print(\"usage: \" + sys.argv[0] + \"
\")\n exit()\n path = sys.argv[1]\n dasm6(path)\n","sub_path":"dasm6/dasm6.py","file_name":"dasm6.py","file_ext":"py","file_size_in_byte":36425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"317415928","text":"import threading\nfrom msg import *\n\n\ndef ProcessMessages():\n\twhile True:\n\t\tif Message.ClientID == 0 :\n\t\t\tbreak\n\t\tm = Message.SendMessage(M_BROKER, M_GETDATA)\n\t\tif m.Header.Type == M_DATA:\n\t\t\tprint(\"\\nMessage from User \" + str(m.Header.From))\n\t\t\tprint(m.Data)\n\t\telse:\n\t\t\ttime.sleep(1)\n\n\ndef Client():\n\tMessage.SendMessage(M_BROKER, M_INIT)\n\tt = threading.Thread(target=ProcessMessages)\n\tt.start()\n\nwhile True:\n\tprint(\"\\n1. Connect\\n2. Write private message\\n3. Write global message\\n0. Exit\\n\")\n\tprint(\"\\nChoose your Fighter:\")\n\tactionId = int(input())\n\tif (actionId == 1):\n\t\tClient()\n\t\tprint(\"Welcome to the club, User \" + str(Message.ClientID))\n\telif (actionId == 2):\n\t\tif (Message.ClientID == 0):\n\t\t\tprint(\"Please, connect to server\")\n\t\t\tcontinue\n\t\tprint(\"Write Buddie`s ID:\")\n\t\trecieverId = int(input())\n\t\tprint(\"\\nWrite message to Buddie:\\n\")\n\t\tMessage.SendMessage(recieverId, M_DATA, input())\n\telif (actionId == 3):\n\t\tif(Message.ClientID == 0):\n\t\t\tprint(\"Please, connect to server\")\n\t\t\tcontinue\n\t\tprint(\"Write message to Buddies:\")\n\t\tMessage.SendMessage(M_ALL, M_DATA, input())\n\telif (actionId == 0):\n\t\tprint(\"Goodbye, Buddie\\n\")\n\t\tif (Message.ClientID != 0):\n\t\t\tMessage.SendMessage(M_BROKER, M_EXIT)\n\telse:\n\t\tprint(\"Action Unknown\")\n","sub_path":"MsgServer/PythonClient/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"571591331","text":"import logging\n\nfrom curation_projects import raamaayana\nfrom doc_curation.md_helper import MdFile\n\n# Remove all handlers associated with the root logger object.\nfor handler in logging.root.handlers[:]:\n logging.root.removeHandler(handler)\nlogging.basicConfig(\n level=logging.DEBUG,\n format=\"%(levelname)s:%(asctime)s:%(module)s:%(lineno)d %(message)s\")\n\n\nmd_file_path = \"/home/vvasuki/vvasuki-git/kAvya/content/TIkA/padyam/purANam/rAmAyaNam/AndhrapAThaH\"\n# MdFile.fix_index_files(dir_path=md_file_path, dry_run=False)\n# MdFile.fix_titles(\n# md_files=raamaayana.get_adhyaaya_md_files(md_file_path),\n# spreadhsheet_id=\"1xqVBhDwRzcEL7HlCJhxmnG1aOFFk6B8gGZ4GuBZynf8\",\n# worksheet_name=\"शीर्षिकाः\", id_column=\"id\", title_column=\"अन्तिमशीर्षिका\", md_file_to_id=raamaayana.get_adhyaaya_id, dry_run=False\n# )\nMdFile.devanaagarify_titles(md_files=raamaayana.get_adhyaaya_md_files(md_file_path), dry_run=False)","sub_path":"curation_projects/raamaayana/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"578296135","text":"import concurrent.futures\nimport json\nimport hashlib\nimport mimetypes\nimport re\nimport os\nimport stat\nimport logging\nimport pkg_resources\nimport shutil\nimport xml.etree.ElementTree as ET\n\nfrom django.conf import settings\nfrom django.core.files.storage import default_storage\nfrom django.template import Context, Template\nfrom django.utils import timezone\nfrom webob import Response\nfrom openedx.core.djangoapps.site_configuration import helpers as configuration_helpers\n\nfrom xblock.completable import CompletableXBlockMixin\nfrom xblock.core import XBlock\nfrom xblock.fields import Scope, String, Float, Boolean, Dict, DateTime, Integer\nfrom xblock.fragment import Fragment\n\n\n# Make '_' a no-op so we can scrape strings\n_ = lambda text: text\n\nlog = logging.getLogger(__name__)\n\nSCORM_ROOT = os.path.join(settings.MEDIA_ROOT, \"scormxblockmedia\")\nSCORM_URL = os.path.join(settings.MEDIA_URL, \"scormxblockmedia\")\nMAX_WORKERS = getattr(settings, \"THREADPOOLEXECUTOR_MAX_WORKERS\", 10)\nENABLE_PUBLISH_FAILED_SCORM_SCORE = settings.FEATURES.get('ENABLE_PUBLISH_FAILED_SCORM_SCORE', False)\n\n\nclass ScormXBlock(XBlock, CompletableXBlockMixin):\n display_name = String(\n display_name=_(\"Display Name\"),\n help=_(\"Display name for this module\"),\n default=\"Scorm\",\n scope=Scope.settings,\n )\n scorm_file = String(\n display_name=_(\"Upload scorm file\"),\n scope=Scope.settings,\n )\n path_index_page = String(\n display_name=_(\"Path to the index page in scorm file\"),\n scope=Scope.settings,\n )\n scorm_file_meta = Dict(scope=Scope.content)\n version_scorm = String(\n default=\"SCORM_12\",\n scope=Scope.settings,\n )\n # save completion_status for SCORM_2004\n lesson_status = String(scope=Scope.user_state, default=\"not attempted\")\n success_status = String(scope=Scope.user_state, default=\"unknown\")\n data_scorm = Dict(scope=Scope.user_state, default={})\n lesson_score = Float(scope=Scope.user_state, default=0)\n weight = Float(default=1, scope=Scope.settings)\n has_score = Boolean(\n display_name=_(\"Scored\"),\n help=_(\n \"Select False if this component will not receive a numerical score from the Scorm\"\n ),\n default=True,\n scope=Scope.settings,\n )\n icon_class = String(\n default=\"video\",\n scope=Scope.settings,\n )\n width = Integer(\n display_name=_(\"Display Width (px)\"),\n help=_(\"Width of iframe, if empty, the default 100%\"),\n scope=Scope.settings,\n )\n height = Integer(\n display_name=_(\"Display Height (px)\"),\n help=_(\"Height of iframe\"),\n default=450,\n scope=Scope.settings,\n )\n open_in_pop_up = Boolean(\n display_name=_(\"Open in Pop-up\"),\n help=_(\n \"Select True if you want learners to click on 'view course' button and then open scorm content in a pop-up window.\"\n \"Select False if you want the scorm content to open in an IFrame in the current page. \"\n ),\n default=False,\n scope=Scope.settings\n )\n\n has_author_view = True\n\n def resource_string(self, path):\n \"\"\"Handy helper for getting resources from our kit.\"\"\"\n data = pkg_resources.resource_string(__name__, path)\n return data.decode(\"utf8\")\n\n def student_view(self, context=None):\n context_html = self.get_context_student()\n template = self.render_template(\"static/html/scormxblock.html\", context_html)\n frag = Fragment(template)\n frag.add_css(self.resource_string(\"static/css/scormxblock.css\"))\n frag.add_javascript(self.resource_string(\"static/js/src/scormxblock.js\"))\n settings = {\"version_scorm\": self.version_scorm}\n frag.initialize_js(\"ScormXBlock\", json_args=settings)\n return frag\n\n def studio_view(self, context=None):\n context_html = self.get_context_studio()\n template = self.render_template(\"static/html/studio.html\", context_html)\n frag = Fragment(template)\n frag.add_css(self.resource_string(\"static/css/scormxblock.css\"))\n frag.add_javascript(self.resource_string(\"static/js/src/studio.js\"))\n frag.initialize_js(\"ScormStudioXBlock\")\n return frag\n\n def author_view(self, context=None):\n html = self.render_template(\"static/html/author_view.html\", context)\n frag = Fragment(html)\n return frag\n\n def _delete_local_storage(self):\n path = self.local_storage_path\n if os.path.exists(path):\n shutil.rmtree(path)\n\n @property\n def local_storage_path(self):\n return os.path.join(\n SCORM_ROOT, self.location.org, self.location.course, self.location.block_id\n )\n\n @property\n def s3_storage(self):\n return \"S3\" in default_storage.__class__.__name__\n\n def get_remote_path(self, local_path):\n return \"\".join(\n [self._file_storage_path(), local_path.replace(self.local_storage_path, \"\")]\n )\n\n @XBlock.handler\n def studio_submit(self, request, suffix=\"\"):\n self.display_name = request.params[\"display_name\"]\n self.width = request.params[\"width\"]\n self.height = request.params[\"height\"]\n self.open_in_pop_up = request.params[\"open_in_pop_up\"]\n self.has_score = request.params[\"has_score\"]\n self.icon_class = \"problem\" if self.has_score else \"video\"\n\n if hasattr(request.params[\"file\"], \"file\"):\n scorm_file = request.params[\"file\"].file\n\n # First, save scorm file in the storage for mobile clients\n self.scorm_file_meta[\"sha1\"] = self.get_sha1(scorm_file)\n self.scorm_file_meta[\"name\"] = scorm_file.name\n self.scorm_file_meta[\"path\"] = self._file_storage_path()\n self.scorm_file_meta[\"last_updated\"] = timezone.now().strftime(\n DateTime.DATETIME_FORMAT\n )\n self.scorm_file_meta[\"size\"] = scorm_file.size\n\n self._unpack_files(scorm_file)\n self.update_subdir_permissions()\n self.set_fields_xblock()\n if self.s3_storage:\n self._store_unziped_files_to_s3()\n # Removed locally unzipped files once we have store them on S3\n self._delete_local_storage()\n\n # changes made for juniper (python 3.5)\n return Response(\n json.dumps({\"result\": \"success\"}),\n content_type=\"application/json\",\n charset=\"utf8\",\n )\n\n def update_subdir_permissions(self):\n \"\"\"\n Extends existing permissions of all the the sub-directories with the Owner Execute permission (S_IXUSR).\n\n All sub-directories of the scorm-package must have executable permissions for the Directory Owner otherwise\n Studio will raise Permission Denied error on scorm package upload.\n \"\"\"\n for path, subdirs, files in os.walk(self.local_storage_path):\n for name in subdirs:\n dir_path = os.path.join(path, name)\n st = os.stat(dir_path)\n os.chmod(dir_path, st.st_mode | stat.S_IXUSR)\n\n def _unpack_files(self, scorm_file):\n \"\"\"\n Unpacks zip file using unzip system utility\n \"\"\"\n # Now unpack it into SCORM_ROOT to serve to students later\n self._delete_local_storage()\n local_path = self.local_storage_path\n os.makedirs(local_path)\n\n if hasattr(scorm_file, \"temporary_file_path\"):\n os.system(\n \"unzip {} -d {}\".format(scorm_file.temporary_file_path(), local_path)\n )\n else:\n temporary_path = os.path.join(SCORM_ROOT, scorm_file.name)\n temporary_zip = open(temporary_path, \"wb\")\n scorm_file.open()\n temporary_zip.write(scorm_file.read())\n temporary_zip.close()\n os.system(\"unzip {} -d {}\".format(temporary_path, local_path))\n os.remove(temporary_path)\n\n def _fix_content_type(self, file_path):\n \"\"\"\n Sometimes content type of file returned by mimetypes module is bytes object instead of string\n which fails content type validation of boto3 and boto3 would not upload file instead throws\n `botocore.exceptions.ParamValidationError: Parameter validation failed:`\n This method fixes such content types by changing their type from bytes to string\n \"\"\"\n _content_type, __ = mimetypes.guess_type(file_path)\n try:\n str_type = _content_type.decode(\"utf-8\")\n ext = file_path.split(\".\")[-1]\n mimetypes.add_type(str_type, \".\" + ext)\n except (UnicodeDecodeError, AttributeError):\n pass\n\n def _upload_file(self, file_path):\n self._fix_content_type(file_path)\n path = self.get_remote_path(file_path)\n with open(file_path, \"rb\") as content_file:\n default_storage.save(path, content_file)\n log.info('S3: \"{}\" file stored at \"{}\"'.format(file_path, path))\n\n def _delete_existing_files(self, path):\n \"\"\"\n Recusively delete all files under given path\n \"\"\"\n dir_names, file_names = default_storage.listdir(path)\n with concurrent.futures.ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:\n tracker_futures = []\n for file_name in file_names:\n file_path = \"/\".join([path, file_name])\n tracker_futures.append(\n executor.submit(default_storage.delete, file_path)\n )\n log.info('S3: \"{}\" file deleted'.format(file_path))\n\n for dir_name in dir_names:\n dir_path = \"/\".join([path, dir_name])\n self._delete_existing_files(dir_path)\n\n def _store_unziped_files_to_s3(self):\n \"\"\"\"\"\"\n self._delete_existing_files(self._file_storage_path())\n local_path = self.local_storage_path\n file_paths = []\n for path, subdirs, files in os.walk(local_path):\n for name in files:\n file_paths.append(os.path.join(path, name))\n\n with concurrent.futures.ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:\n tracker_futures = {\n executor.submit(self._upload_file, file_path): file_path\n for file_path in file_paths\n }\n for future in concurrent.futures.as_completed(tracker_futures):\n file_path = tracker_futures[future]\n try:\n future.result()\n except Exception as exc:\n log.info(\n \"S3: upload of %r generated an exception: %s\" % (file_path, exc)\n )\n\n @XBlock.json_handler\n def scorm_get_value(self, data, suffix=\"\"):\n name = data.get(\"name\")\n if name in [\"cmi.core.lesson_status\", \"cmi.completion_status\"]:\n return {\"value\": self.lesson_status}\n elif name == \"cmi.success_status\":\n return {\"value\": self.success_status}\n elif name in [\"cmi.core.score.raw\", \"cmi.score.raw\"]:\n return {\"value\": self.lesson_score * 100}\n else:\n return {\"value\": self.data_scorm.get(name, \"\")}\n\n @XBlock.json_handler\n def scorm_set_value(self, data, suffix=\"\"):\n context = {\"result\": \"success\"}\n name = data.get(\"name\")\n\n if name in [\"cmi.core.lesson_status\", \"cmi.completion_status\"]:\n self.lesson_status = data.get(\"value\")\n if self.has_score and data.get(\"value\") in [\n \"completed\",\n \"failed\",\n \"passed\",\n ]:\n self.publish_grade()\n context.update({\"lesson_score\": self.lesson_score})\n\n elif name == \"cmi.success_status\":\n self.success_status = data.get(\"value\")\n if self.has_score:\n if self.success_status == \"unknown\":\n self.lesson_score = 0\n self.publish_grade()\n context.update({\"lesson_score\": self.lesson_score})\n elif name in [\"cmi.core.score.raw\", \"cmi.score.raw\"] and self.has_score:\n self.lesson_score = round(float(data.get(\"value\", 0)) / 100.0, 2)\n self.publish_grade()\n context.update({\"lesson_score\": self.lesson_score})\n else:\n self.data_scorm[name] = data.get(\"value\", \"\")\n\n completion_status = self.get_completion_status()\n context.update({\"completion_status\": completion_status})\n\n # publish completion\n if completion_status in [\"passed\", \"failed\", \"completed\"]:\n self.emit_completion(1.0)\n\n return context\n\n def publish_grade(self):\n if not ENABLE_PUBLISH_FAILED_SCORM_SCORE and (\n self.lesson_status == \"failed\" or (\n self.version_scorm == \"SCORM_2004\" and self.success_status in [\"failed\", \"unknown\"]\n )\n ):\n self.runtime.publish(\n self,\n \"grade\",\n {\n \"value\": 0,\n \"max_value\": self.weight,\n },\n )\n else:\n self.runtime.publish(\n self,\n \"grade\",\n {\n \"value\": self.lesson_score,\n \"max_value\": self.weight,\n },\n )\n\n def max_score(self):\n \"\"\"\n Return the maximum score possible.\n \"\"\"\n return self.weight if self.has_score else None\n\n def get_context_studio(self):\n return {\n \"field_display_name\": self.fields[\"display_name\"],\n \"field_scorm_file\": self.fields[\"scorm_file\"],\n \"field_has_score\": self.fields[\"has_score\"],\n \"field_width\": self.fields[\"width\"],\n \"field_height\": self.fields[\"height\"],\n \"field_open_in_pop_up\": self.fields[\"open_in_pop_up\"],\n \"scorm_xblock\": self,\n }\n\n def get_context_student(self):\n scorm_file_path = \"\"\n if self.scorm_file:\n scorm_file_path = \"{}{}\".format(\n configuration_helpers.get_value(\"LMS_ROOT_URL\", settings.LMS_ROOT_URL),\n self.scorm_file,\n )\n\n return {\n \"scorm_file_path\": scorm_file_path,\n \"completion_status\": self.get_completion_status(),\n \"scorm_xblock\": self,\n }\n\n def render_template(self, template_path, context):\n template_str = self.resource_string(template_path)\n template = Template(template_str)\n return template.render(Context(context))\n\n def set_fields_xblock(self):\n\n self.path_index_page = \"index.html\"\n try:\n tree = ET.parse(\"{}/imsmanifest.xml\".format(self.local_storage_path))\n except IOError:\n pass\n else:\n namespace = \"\"\n for node in [\n node\n for _, node in ET.iterparse(\n \"{}/imsmanifest.xml\".format(self.local_storage_path),\n events=[\"start-ns\"],\n )\n ]:\n if node[0] == \"\":\n namespace = node[1]\n break\n root = tree.getroot()\n\n if namespace:\n resource = root.find(\n \"{{{0}}}resources/{{{0}}}resource\".format(namespace)\n )\n schemaversion = root.find(\n \"{{{0}}}metadata/{{{0}}}schemaversion\".format(namespace)\n )\n else:\n resource = root.find(\"resources/resource\")\n schemaversion = root.find(\"metadata/schemaversion\")\n\n if resource is not None:\n self.path_index_page = resource.get(\"href\")\n if (schemaversion is not None) and (\n re.match(\"^1.2$\", schemaversion.text) is None\n ):\n self.version_scorm = \"SCORM_2004\"\n else:\n self.version_scorm = \"SCORM_12\"\n\n self.scorm_file = os.path.join(\n SCORM_URL,\n \"{}/{}/{}/{}\".format(\n self.location.org,\n self.location.course,\n self.location.block_id,\n self.path_index_page,\n ),\n )\n\n def get_completion_status(self):\n completion_status = self.lesson_status\n if self.version_scorm == \"SCORM_2004\" and self.success_status != \"unknown\":\n completion_status = self.success_status\n return completion_status\n\n def _file_storage_path(self):\n \"\"\"\n Get file path of storage.\n \"\"\"\n path = \"scormxblockmedia/{loc.org}/{loc.course}/{loc.block_id}\".format(\n loc=self.location,\n )\n return path\n\n def get_sha1(self, file_descriptor):\n \"\"\"\n Get file hex digest (fingerprint).\n \"\"\"\n block_size = 8 * 1024\n sha1 = hashlib.sha1()\n # changes made for juniper (python 3.5)\n while True:\n block = file_descriptor.read(block_size)\n if not block:\n break\n sha1.update(block)\n file_descriptor.seek(0)\n return sha1.hexdigest()\n\n def student_view_data(self):\n \"\"\"\n Inform REST api clients about original file location and it's \"freshness\".\n Make sure to include `student_view_data=scormxblock` to URL params in the request.\n \"\"\"\n if self.scorm_file and self.scorm_file_meta:\n return {\n \"last_modified\": self.scorm_file_meta.get(\"last_updated\", \"\"),\n \"scorm_data\": default_storage.url(self._file_storage_path()),\n \"size\": self.scorm_file_meta.get(\"size\", 0),\n \"index_page\": self.path_index_page,\n }\n return {}\n\n @staticmethod\n def workbench_scenarios():\n \"\"\"A canned scenario for display in the workbench.\"\"\"\n return [\n (\n \"ScormXBlock\",\n \"\"\"\n \n \n \"\"\",\n ),\n ]\n","sub_path":"scormxblock/scormxblock.py","file_name":"scormxblock.py","file_ext":"py","file_size_in_byte":18160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"422604288","text":"class Animal:\n def speak(self,speak):\n self.speak=speak\n print(self.speak)\nclass Dog(Animal):\n def bark(self,bark):\n self.bark=bark\n print(self.bark)\nobjAnimal=Animal()\nobjDog=Dog()\nobjDog.speak(\"animal speaking\")\nobjDog.bark(\"dog barking\")\n\n","sub_path":"day6practice/inheritance1.py","file_name":"inheritance1.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"172146604","text":"import sys \n#Importando o pyplot\nfrom matplotlib import pyplot as plt\n\nnome = sys.argv[1] \narq = open(nome, \"r\")\n\n#lê o nome do arquivo\ninstancia=arq.readline()\n#lê a melhor solução conhecida\nmelhor=arq.readline()\n\nit = []\nvalor = []\notimo=[]\n#lendo os dados das iterações\nfor linha in arq:\n valores = linha.split()\n it.append(int(valores[0]))\n valor.append(float(valores[1]))\n otimo.append(float(melhor))\n print('iteracao ', valores[0], 'valor ', valores[1] )\n\n\narq.close()\n\n# Eixo_x, Eixo_y\nplt.plot(it, valor,label='Busca Local')\nplt.plot(it, otimo,label='Melhor Valor')\nplt.xlabel('Iteração')\nplt.ylabel('Valor')\nplt.title(instancia)\nplt.legend()\nplt.grid()\n#plt.xticks(range(min(it), max(it)+1))\n\n#plt.autoscale(enable=True,axis='x',tight=True)\n#plt.gca().set_aspect('equal', adjustable='box')\n#plt.draw()\nplt.show()","sub_path":"grafico.py","file_name":"grafico.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"11355129","text":"import json\nimport os\nimport re\nfrom pathlib import Path\nfrom typing import Any, Dict, Iterable, Sized, Tuple\n\n\ndef _get_root() -> Tuple[bool, Path]:\n env_root = os.environ.get('ISHUROOT')\n if env_root:\n env_path = Path(env_root).expanduser().resolve()\n if env_path.exists():\n return True, (env_path / '.ishu')\n return False, (Path().resolve() / '.ishu')\n\n\nROOT_OVERRIDE, ROOT = _get_root()\n\n# Don't call this 'tags' to avoid conflicts with ctags\nTAGS_PATH = ROOT / 'registered_tags'\nISSUE_FNAME = 'issue'\nTIMESTAMP_FMT = '%Y-%m-%dT%H:%M:%S%z'\nCONFIG_PATH = Path.home() / '.config' / 'ishu.conf'\n\n\n# == Filesystem handlers ==\n\ndef user_path(user: str) -> Path:\n return ROOT / f'user-{user}'\n\n\ndef user_paths() -> Iterable[Path]:\n return ROOT.glob('user-*')\n\n\ndef usernames() -> Iterable[str]:\n return [f.name.split('-', 1)[1] for f in user_paths()]\n\n\ndef issue_path(user: str, id_: int) -> Path:\n return user_path(user) / f'issue-{id_}' / ISSUE_FNAME\n\n\ndef comment_paths(user: str, id_: int) -> Iterable[Path]:\n return issue_path(user, id_).parent.glob('comment-*')\n\n\n# == Config ==\n\nclass IncompleteConfigException(Exception):\n pass\n\n\nclass InvalidConfigException(Exception):\n pass\n\n\nclass Config:\n settings = frozenset(['user', 'aliases'])\n editable_settings = frozenset(['user'])\n\n def __init__(self, user: str) -> None:\n self.user = self._validate_user(user)\n self.aliases: Dict[str, str] = {}\n\n def _validate_user(self, user: str) -> str:\n if not re.fullmatch(r'[a-zA-Z]+', user):\n raise InvalidConfigException(\n 'username can only consist of a-z and A-Z'\n )\n return user\n\n def __getitem__(self, key: str) -> Any:\n if key == 'user':\n return self.user\n else:\n raise KeyError('No such setting')\n\n def __setitem__(self, key: str, value: Any) -> None:\n if key == 'user':\n self.user = self._validate_user(value)\n else:\n raise KeyError('No such setting')\n\n @classmethod\n def load(cls) -> 'Config':\n data: Dict[str, Any] = json.loads(CONFIG_PATH.read_text())\n cfg = Config(user=data['user'])\n if 'aliases' in data:\n cfg.aliases = data['aliases']\n return cfg\n\n def save(self) -> None:\n if not CONFIG_PATH.parent.exists():\n CONFIG_PATH.parent.mkdir(parents=True, exist_ok=True)\n data: Dict[str, Any] = {\n 'user': self.user,\n 'aliases': self.aliases,\n }\n CONFIG_PATH.write_text(json.dumps(data, indent=2))\n","sub_path":"ishu/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":2620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"405519050","text":"from django.test import TestCase\n\nfrom mainapp.models import Product, ProductCategory\n\n\nclass ProductsTestCase(TestCase):\n def setUp(self):\n category = ProductCategory.objects.create(name=\"колбасы\")\n self.product_1 = Product.objects.create(\n name=\"колбаса 1\",\n category=category,\n price=1999.5,\n quantity=150\n )\n self.product_2 = Product.objects.create(\n name=\"колбаса 2\",\n category=category,\n price=2998.1,\n quantity=125,\n is_active=False\n )\n self.product_3 = Product.objects.create(\n name=\"колбаса 3\",\n category=category,\n price=998.1,\n quantity=115\n )\n\n def test_product_get(self):\n product_1 = Product.objects.get(name=\"колбаса 1\")\n product_2 = Product.objects.get(name=\"колбаса 2\")\n self.assertEqual(product_1, self.product_1)\n self.assertEqual(product_2, self.product_2)\n\n def test_product_print(self):\n product_1 = Product.objects.get(name=\"колбаса 1\")\n product_2 = Product.objects.get(name=\"колбаса 2\")\n self.assertEqual(str(product_1), 'колбаса 1 (колбасы)')\n self.assertEqual(str(product_2), 'колбаса 2 (колбасы)')\n\n def test_product_get_items(self):\n product_1 = Product.objects.get(name=\"колбаса 1\")\n product_3 = Product.objects.get(name=\"колбаса 3\")\n products = Product.get_items()\n\n self.assertEqual(list(products), [product_1, product_3])","sub_path":"mainapp/tests_products.py","file_name":"tests_products.py","file_ext":"py","file_size_in_byte":1639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"559115094","text":"# -*- coding: utf-8 -*-\n#\n# Part of Open311-NGSI integration tool\n# Author: Juho Vuori (juho.vuori@codento.com)\n# Copyright: Forum Virium Helsinki\n#\n# http://www.forumvirium.fi/\n#\n\norionContextBrokerURL = \"http://130.206.82.148:1234/\"\nopen311URL = 'http://asiointi.hel.fi/palautews/rest/v1/'\n\n","sub_path":"open3112ngsi/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"297143934","text":"class Solution:\n def exist(self, board: List[List[str]], word: str) -> bool:\n self.ROWS = len(board)\n self.COLS = len(board[0])\n self.board = board\n \n for row in range(self.ROWS):\n for col in range(self.COLS):\n if self.backtrack(row, col, word):\n return True\n \n # no match found after all exploration\n return False\n \n def backtrack(self,row, col,suffix):\n if len(suffix)==0:\n return True\n if row<0 or row>=len(self.board) or col<0 or col>= len(self.board[0]) or self.board[row][col]!=suffix[0]:\n return False\n self.board[row][col]='#'\n for rowOffset, colOffset in [(0,1),(1,0),(0,-1),(-1,0)]:\n ret = self.backtrack(row+rowOffset,col+colOffset,suffix[1:])\n if ret:\n break\n self.board[row][col] = suffix[0]\n return ret\n","sub_path":"Problem79_Word_Search.py","file_name":"Problem79_Word_Search.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"74701701","text":"import cv2\r\nimport os\r\nimport numpy\r\n\r\n\r\ndef contrast_limited_adaptive_HE(channel_img):\r\n assert(len(channel_img.shape) == 2)\r\n clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(\r\n 8, 8)) # Create CLAHE Object\r\n clahe_image = numpy.empty(channel_img.shape, dtype='uint8')\r\n clahe_image = clahe.apply(numpy.array(channel_img, dtype='uint8'))\r\n\r\n return clahe_image\r\n\r\n\r\ndef image_preprocessing(img):\r\n img = cv2.cvtColor(img, cv2.COLOR_RGB2LAB)\r\n l, a, b = cv2.split(img)\r\n l = contrast_limited_adaptive_HE(l)\r\n\r\n processed_image = cv2.merge((l, a, b))\r\n processed_image = cv2.cvtColor(processed_image, cv2.COLOR_LAB2RGB)\r\n\r\n return processed_image\r\n\r\n\r\ndef preprocess(fname):\r\n images = []\r\n\r\n Mask = numpy.zeros((600, 600), dtype='uint8')\r\n Mask[Mask.shape[0]//2][Mask.shape[1]//2] = 255\r\n GLCMask = cv2.dilate(Mask, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,\r\n (Mask.shape[0]-30, Mask.shape[1]-30)), iterations=1)\r\n _img = cv2.imread(fname)\r\n _img = cv2.cvtColor(_img, cv2.COLOR_BGR2RGB)\r\n _img = cv2.resize(_img, (600, 600))\r\n _img = cv2.bitwise_and(_img, _img, mask=GLCMask)\r\n _img = cv2.resize(_img, (384, 384))\r\n _img = image_preprocessing(_img)\r\n\r\n images.append(_img)\r\n images = numpy.array(images, dtype='uint8')\r\n return images\r\n","sub_path":"ear/modules/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"216375880","text":"from pico2d import *\nimport math\nimport game_world\nimport game_framework\n\nclass fire:\n lazer_start = None\n lazer_middle = None\n lazer_last = None\n\n def __init__(self, x = 0, y = 0, radians = 0.0, lazer_part = 0, show = 0):\n self.x = x\n self.y = y\n self.radians = radians\n self.lazer_part = lazer_part\n self.count = show\n self.showtime = get_time()\n if fire.lazer_start is None:\n fire.lazer_start = load_image('bullet_image//lazer_start.png') # 36 92\n if fire.lazer_middle is None:\n fire.lazer_middle = load_image('bullet_image//lazer_middle.png') # 100 100\n if fire.lazer_last is None:\n fire.lazer_last = load_image('bullet_image//lazer_last.png') # w 48 h 100\n\n def update(self):\n if self.x < 0 or self.x > 800 or self.y < 0 or self.y > 700:\n game_world.remove_object(self)\n if self.count == 0:\n self.x = self.x + 400*math.cos(self.radians)*game_framework.frame_time\n self.y = self.y + 400*math.sin(self.radians)*game_framework.frame_time\n for gets in game_world.enemy_objects():\n if gets.hp >= 0:\n if math.sqrt((gets.x - self.x) * (gets.x - self.x) + (gets.y - self.y) * (gets.y - self.y)) < gets.size - 40:\n game_world.remove_object(self)\n gets.hp -= 3\n break\n else:\n if self.count < get_time() - self.showtime:\n self.count = 0\n\n\n\n def draw(self):\n if self.count == 0:\n if self.lazer_part == 0:\n self.lazer_start.clip_composite_draw(0, 0, 36, 92, self.radians, 'hv', self.x, self.y, 23, 50)\n elif self.lazer_part == 1:\n self.lazer_middle.clip_composite_draw(0, 0, 100, 100, self.radians, '', self.x, self.y, 27, 50)\n elif self.lazer_part == 2:\n self.lazer_last.clip_composite_draw(0, 0, 48, 100, self.radians, 'hv', self.x, self.y, 25, 50)\n\n\n","sub_path":"2DGP project/2DGP 게임 제작/green_attack.py","file_name":"green_attack.py","file_ext":"py","file_size_in_byte":2038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"176547489","text":"\n\nfrom xai.brain.wordbase.nouns._irrelevance import _IRRELEVANCE\n\n#calss header\nclass _IRRELEVANCES(_IRRELEVANCE, ):\n\tdef __init__(self,): \n\t\t_IRRELEVANCE.__init__(self)\n\t\tself.name = \"IRRELEVANCES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"irrelevance\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_irrelevances.py","file_name":"_irrelevances.py","file_ext":"py","file_size_in_byte":273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"106654744","text":"import numpy as np\r\nimport cv2\r\nimport sys\r\n \r\n# ------------------------------------------------------------------------------------------------------\r\n# Function: Main Program\r\n# Inputs: \r\n# return: \r\n# ------------------------------------------------------------------------------------------------------\r\ndef main():\r\n video_capture = cv2.VideoCapture('http://admin:@192.168.10.1/media/?action=stream')\r\n \r\n while(True):\r\n # Capture the frames\r\n grabbed, frame = video_capture.read()\r\n width = video_capture.get(cv2.CAP_PROP_FRAME_WIDTH )\r\n height = video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT )\r\n\r\n #Display the resulting frame\r\n cv2.imshow('frame',frame)\r\n\r\n # Handle user keyboard inputs\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n\r\n video_capture.release()\r\n cv2.destroyAllWindows()\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"AICODE/2_IPCam.py","file_name":"2_IPCam.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"595395614","text":"from flask_restplus import Namespace, fields\n\n\nclass ChoiceValue(fields.Raw):\n \"\"\"\n Custom field to transform the choices field\n so as to display it's value repr instead\n \"\"\"\n def __init__(self, *args, **kwargs):\n super().__init__()\n self.min_length = kwargs.pop('min_length', None)\n self.max_length = kwargs.pop('max_length', None)\n self.pattern = kwargs.pop('pattern', None)\n\n def schema(self):\n return {\n 'minLength': self._v('min_length'),\n 'maxLength': self._v('max_length'),\n 'pattern': self._v('pattern'),\n }\n\n def format(self, choice):\n return choice.value\n\n\nclass JobPostDto:\n api = Namespace('job_post', description='job post related operations')\n job_post = api.model('job_post', {\n 'category': ChoiceValue(required=True, description='Job Category', attribute='category'),\n 'title': fields.String(required=True, description='Job Title'),\n 'description': fields.String(required=True, description='Job Description'),\n 'company_name': fields.String(required=True, description='Company Name'),\n 'region': fields.String(required=True, description='Region'),\n 'company_url': fields.String(required=True, description='Company Url'),\n 'created_on': fields.DateTime(description='Created On'),\n 'id': fields.Integer(description='Id'),\n 'updated_on': fields.DateTime(description='Updated On')\n })\n","sub_path":"app/main/util/dto.py","file_name":"dto.py","file_ext":"py","file_size_in_byte":1472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"276463092","text":"#!/usr/bin/env python\n\n__author__ = \"Elisa Londero\"\n__email__ = \"elisa.londero@inaf.it\"\n__date__ = \"September 2019\"\n\nimport pymysql\nfrom sqlalchemy import Column\nfrom sqlalchemy import String\nfrom sqlalchemy import Integer\nfrom utilities import LoggingClass\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.ext.declarative import declarative_base\n\nBase = declarative_base()\nlog = LoggingClass('',True).get_logger()\n\nclass MySQLDatabase(object):\n def __init__(self, user, pwd, dbname, host='localhost', port='3306'):\n self.user = user\n self.pwd = pwd\n self.host = host\n self.port = port\n self.dbname = dbname\n\n def _create_session(self):\n sdb = 'mysql+pymysql://%s:%s@%s:%s/%s'%(self.user,self.pwd,self.host,self.port,self.dbname)\n try:\n engine = create_engine(sdb)\n db_session = sessionmaker(bind=engine)\n return db_session()\n except Exception as e:\n msg = \"Database session creation excep - MySQLDatabase._create_session -- \"\n log.error(\"{0}{1}\".format(msg,e))\n\n def _validate_session(self):\n try:\n connection = self._create_session().connection()\n return True\n except Exception as e:\n msg = \"Database session validation excep - MySQLDatabase._validate_session -- \"\n log.error(\"{0}{1}\".format(msg,e))\n return False\n\n def mysql_session(self):\n Session = self._validate_session()\n if Session:\n return self._create_session() \n else:\n exit(1)\n\n def close_session(self):\n try:\n self._create_session().close()\n return True\n except Exception as e: \n msg = \"Database session closing excep - MySQLDatabase.close_session -- \"\n log.error(\"{0}{1}\".format(msg,e))\n return False\n\nclass DataFile(Base):\n __tablename__ = 'PRS'\n\n id = Column(Integer, primary_key=True)\n file_name = Column(String(255))\n\n def __init__(self, file_name):\n self.file_name = file_name\n\nclass Queries(object):\n def __init__(self, session, table_object, string):\n self.session = session\n self.table_object = table_object\n self.string = string\n\n def match_filename(self):\n try:\n rows = self.session.query(self.table_object)\n flt = rows.filter(self.table_object.file_name == self.string)\n for j in flt:\n if j.file_name:\n return True\n else:\n return False\n except Exception as e:\n msg = \"Match filename string excep - Queries.match_filename -- \"\n log.error(\"{0}{1}\".format(msg,e))\n","sub_path":"captures_preprocessor/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":2766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"45274841","text":"import net\nfrom algopy import UTPM\nimport tensorflow as tf\nimport numpy as np\nfrom tensorflow import keras\nimport alm\nfrom matplotlib import pyplot\n\nW,N = 8,20\ndelta = 0.1\nrng = np.random.default_rng()\n\nx = np.linspace(0,np.pi,N).reshape((N,1))\n#y = -np.sin(.8*np.pi*x)+rng.normal(0,delta,x.shape)\ny = np.sin(x*x)+rng.normal(0,delta,x.shape)\n\ni = rng.permutation(N)\ni_train,i_val = i[:int(.8*N)],i[int(.8*N):]\n\nsigma = lambda x: tf.math.tanh(x)\nsigma_ = lambda x: 2/(np.cosh(2*x)+1)\n\ntau = lambda x: x\ntau_ = lambda x: np.ones(x.shape)\n\n\n\nmodel = keras.Sequential()\nmodel.add(alm.Dense_d(activation=sigma,activation_=sigma_,units=W,input_shape=(x.shape[1],)))\nmodel.add(alm.Dense_d(activation=sigma,activation_=sigma_,units=W))\nmodel.add(alm.Dense_d(activation=tau,activation_=tau_,units=y.shape[1]))\nsgd = keras.optimizers.Adam(learning_rate=.01)\nes = keras.callbacks.EarlyStopping(monitor='loss',patience=10)\nmodel.compile(optimizer=sgd, loss='mean_squared_error')\n\nw = model.get_weights()\n\nhist = model.fit(x[i_train,:],y[i_train,:],batch_size=i_train.size,validation_data=(x[i_val,:],y[i_val,:]),epochs=2000,callbacks=[es],verbose=0)\nprint(len(hist.history['loss']),hist.history['loss'][-1])\nprint(hist.history.keys())\n\nx_sm = np.linspace(0,np.pi,1000).reshape((1000,1))\ny_adam = model(x_sm)\n\nmodel.set_weights(w)\n\nalmnet = alm.ALMModel(model, x[i_train,:], y[i_train,:])\nhist2 = almnet.fit_alm(val_data=(x[i_val,:],y[i_val,:]))\n\n\npyplot.plot(x,y,'k+',x_sm,y_adam,'b-',x_sm,almnet.model(x_sm),'r-')\n\n#epochs = np.arange(hist2['tol'].size)\n#pyplot.semilogy(epochs,hist2['tol'],epochs,hist2['loss'],epochs,hist2['val_loss'])\npyplot.show()\n","sub_path":"python/kerastest2.py","file_name":"kerastest2.py","file_ext":"py","file_size_in_byte":1643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"26654502","text":"#! /usr/bin/env python3\n\nfrom os import environ\nimport sys\nimport yaml\n\nfrom lsb_release import get_distro_information\n\nros_release = environ[\"TUE_ROS_DISTRO\"]\nubuntu_release = get_distro_information()[\"CODENAME\"]\n\n\ndef show_error(error):\n print(\"ERROR: {0}\".format(error))\n return 1\n\n\ndef main():\n if not 2 <= len(sys.argv) <= 3:\n return show_error(\"Usage: parse-install-yaml install.yaml [--now]\")\n\n now = False\n if len(sys.argv) == 3:\n if sys.argv[2] == \"--now\":\n now = True\n else:\n return show_error(\"Unknown option: {0}\".format(sys.argv[2]))\n\n with open(sys.argv[1]) as f:\n try:\n install_items = yaml.load(f, yaml.CSafeLoader)\n except AttributeError:\n install_items = yaml.load(f, yaml.SafeLoader)\n except (yaml.parser.ParserError, yaml.scanner.ScannerError) as e:\n return show_error(\"Invalid yaml syntax: {0}\".format(e))\n\n if not isinstance(install_items, list):\n return show_error(\"Root of install.yaml file should be a YAML sequence\")\n\n commands = []\n\n # Combine now calls\n now_cache = {\n \"system-now\": [],\n \"pip-now\": [],\n \"pip2-now\": [],\n \"pip3-now\": [],\n \"ppa-now\": [],\n \"snap-now\": [],\n }\n\n for install_item in install_items:\n command = None\n\n try:\n install_type = install_item[\"type\"]\n\n if install_type == \"empty\":\n return 0\n\n elif install_type == \"ros\":\n if \"source\" in install_item:\n source = install_item[\"source\"]\n else:\n if ros_release in install_item:\n source = install_item[ros_release][\"source\"]\n elif \"default\" in install_item:\n source = install_item[\"default\"][\"source\"]\n else:\n return show_error(\n \"ROS distro {} or 'default' not specified in install.yaml\".format(ros_release)\n )\n # Both release and default are allowed to be None\n if source is None:\n continue\n\n source_type = source[\"type\"]\n if source_type == \"git\":\n sub_dir = source.get(\"sub-dir\", \".\")\n\n command = \"tue-install-ros {0} {1} {2}\".format(source_type, source[\"url\"], sub_dir)\n if \"version\" in source:\n command += \" {0}\".format(source[\"version\"])\n elif source_type == \"system\":\n command = \"tue-install-ros system {0}\".format(source[\"name\"])\n else:\n return show_error(\"Unknown ROS install type: '{0}'\".format(source_type))\n\n elif install_type == \"git\":\n command = \"tue-install-{0} {1} {2}\".format(install_type, install_item[\"url\"], install_item[\"path\"])\n if \"version\" in install_item:\n command += \" {0}\".format(install_item[\"version\"])\n\n elif install_type in [\n \"target\",\n \"system\",\n \"pip\",\n \"pip2\",\n \"pip3\",\n \"ppa\",\n \"snap\",\n \"dpkg\",\n \"target-now\",\n \"system-now\",\n \"pip-now\",\n \"pip2-now\",\n \"pip3-now\",\n \"ppa-now\",\n \"snap-now\",\n ]:\n if now and \"now\" not in install_type:\n install_type += \"-now\"\n\n if \"name\" in install_item:\n pkg_name = install_item[\"name\"]\n else:\n if ubuntu_release in install_item:\n pkg_name = install_item[ubuntu_release][\"name\"]\n elif \"default\" in install_item:\n pkg_name = install_item[\"default\"][\"name\"]\n else:\n return show_error(\n \"Ubuntu distro {} or 'default' not specified in install.yaml\".format(ubuntu_release)\n )\n # Both release and default are allowed to be None\n if pkg_name is None:\n continue\n\n # Cache install types which accept multiple pkgs at once\n if install_type in now_cache:\n now_cache[install_type].append(pkg_name)\n continue\n\n command = \"tue-install-{0} {1}\".format(install_type, pkg_name)\n\n else:\n return show_error(\"Unknown install type: '{0}'\".format(install_type))\n\n except KeyError as e:\n return show_error(\"Invalid install.yaml file: Key {0} could not be found.\".format(e))\n\n if not command:\n return show_error(\"Invalid install.yaml file\")\n\n command = command.replace(\" \", \"^\")\n commands.append(command)\n\n for install_type, pkg_list in now_cache.items():\n if pkg_list:\n command = \"tue-install-{0} {1}\".format(install_type, \" \".join(pkg_list))\n command = command.replace(\" \", \"^\")\n commands.append(command)\n\n commands = \" \".join(commands)\n\n print(commands)\n\n return 0\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","sub_path":"installer/parse-install-yaml.py","file_name":"parse-install-yaml.py","file_ext":"py","file_size_in_byte":5399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"434961745","text":"\"\"\"Defines utilities for pytorch.\"\"\"\nimport contextlib\nimport operator\nfrom dataclasses import dataclass\nfrom typing import Any, Dict, Iterable, Iterator, Optional, Sequence, Union, cast\n\nimport torch\nimport torch.nn as nn\nfrom spacy.pipeline import Pipe\nfrom spacy.tokens import Doc\nfrom torch._C import is_grad_enabled # type: ignore\n\n# the type torch.optim.Optimizer uses\nOptimizerParameters = Union[Iterable[torch.Tensor], Iterable[Dict[str, Any]]]\n\n\nclass TorchPipe(Pipe):\n def __init__(self, vocab, model=True, **cfg):\n self.vocab = vocab\n self.model = model\n self._device = torch.device(\"cpu\")\n self.cfg = cfg\n\n @property\n def device(self):\n if not hasattr(self, \"_device\"):\n self._device = torch.device(\"cpu\")\n return self._device\n\n def to(self, device: torch.device):\n self._device = device\n if self.model and not isinstance(self.model, bool):\n self.model.to(device)\n\n def optim_parameters(self) -> OptimizerParameters:\n \"\"\"Return parameters to be optimized.\"\"\"\n self.require_model()\n if self.cfg.get(\"freeze\"):\n return []\n return cast(nn.Module, self.model).parameters()\n\n\n@dataclass\nclass TensorWrapper:\n \"\"\"Pytorch tensor wrapper for efficient handling of part of batch tensors in spacy pipeline\"\"\"\n\n batch_tensor: torch.Tensor\n i: int\n length: Optional[int] = None\n\n def get(self) -> torch.Tensor:\n if self.length is not None:\n return self.batch_tensor[self.i, : self.length]\n return self.batch_tensor[self.i]\n\n\nGoldCat = Dict[str, float]\n\n\ndef goldcat_to_label(goldcat: GoldCat) -> str:\n assert len(goldcat)\n return max(goldcat.items(), key=operator.itemgetter(1))[0]\n\n\ndef goldcats_to_tensor(\n goldcats: Iterable[GoldCat], label2id: Dict[str, int]\n) -> torch.Tensor:\n ids = [label2id[goldcat_to_label(cat)] for cat in goldcats]\n return torch.tensor(ids)\n\n\nTORCH_LOSS = \"torch_loss\"\n\n\ndef get_loss_from_docs(docs: Iterable[Doc]) -> torch.Tensor:\n _losses = (doc.user_data.get(TORCH_LOSS) for doc in docs)\n losses = [loss for loss in _losses if isinstance(loss, torch.Tensor)]\n if not losses:\n raise ValueError(\"loss is not set to docs.\")\n tlosses = torch.stack(losses)\n return torch.sum(tlosses)\n\n\ndef add_loss_to_docs(docs: Sequence[Doc], loss: torch.Tensor):\n \"\"\"Add loss to docs' existing loss. \"\"\"\n doc = docs[0]\n if TORCH_LOSS in doc.user_data:\n doc.user_data[TORCH_LOSS] += loss\n else:\n doc.user_data[TORCH_LOSS] = loss\n\n\n@contextlib.contextmanager\ndef set_grad(grad: bool) -> Iterator[None]:\n prev = is_grad_enabled()\n torch.set_grad_enabled(grad)\n yield\n torch.set_grad_enabled(prev)\n","sub_path":"camphr/torch_utils.py","file_name":"torch_utils.py","file_ext":"py","file_size_in_byte":2754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"403182432","text":"import io,os\nfrom igf_data.process.metadata_reformat.reformat_samplesheet_file import Reformat_samplesheet_file\n\ndef convert_file_to_stream(infile):\n try:\n data = ''\n with open(infile,'r') as file_i:\n with io.StringIO() as file_o:\n for line in file_i:\n file_o.write(line)\n\n data = file_o.getvalue()\n return data\n except Exception as e:\n raise ValueError('Failedto convert file to stream, error: {0}'.format(e))\n\ndef run_samplesheet_reformatting(samplesheet_file,output_dir,revcomp_index1,\n revcomp_index2,remove_adapters):\n try:\n csv_data = ''\n samplesheet_output = \\\n os.path.join(\\\n output_dir,\n 'reformatted_samplesheet.csv')\n re_samplesheet = \\\n Reformat_samplesheet_file( \\\n infile=samplesheet_file,\n revcomp_index1=revcomp_index1,\n revcomp_index2=revcomp_index2,\n remove_adapters=remove_adapters)\n re_samplesheet.\\\n reformat_raw_samplesheet_file(\\\n output_file=samplesheet_output)\n csv_data = \\\n convert_file_to_stream(infile=samplesheet_output)\n return csv_data\n except Exception as e:\n raise ValueError('Failed running samplesheet reformatting, error: {0}'.format(e))","sub_path":"app/samplesheet/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"587102251","text":"import os\nimport sys\nfrom PyQt5.QtCore import QStringListModel\n# file 을 읽고 파싱 후 올라오는 column 정보 \n_parameters_file_columns_info_old = [ \n 'Group', # key column\n 'Code#' , \n 'AtValue',\n 'TitleIndex', \n 'ParaVar', \n 'KpdFunc',\n '공장설정값',\n '최대값', \n '최소값', \n '폼메시지',\n '단위', \n 'Attribute', \n 'ShowVar',\n 'ShowVal',\n 'MaxEDS', \n 'MinEDS',\n '설명' \n]\n# version 4 이상 file 을 파싱후 올라오는 column 정보 \n_parameters_file_columns_info = [\n 'Group', # key column 단순 all 값만 옴 \n 'Name',\n 'TitleIndex',\n 'AtValue',\n 'FloatScale', \n 'Uint16Scale', \n '공장설정값',\n '최대값', \n '최소값', \n '읽기전용',\n '운전중변경불가', \n '0입력가능', \n '통신쓰기금지', \n '폼메시지', \n '단위', \n '16bit주소',\n '32bit주소',\n '설명'\n]\n# table view 에서 보여지는 column 의 정보 \n_parameters_view_columns_info = [\n 'Group', # 숨김 컬럼 key column 왼쪽 컬럼 클릭시 사용하기 위한 용도 \n 'Code#', # old table 용도로 new table 읽을시 감춰짐 \n 'Name',\n 'TitleIndex', \n 'Title',\n 'AtValue',\n 'FloatScale', \n 'Uint16Scale', \n '공장설정값',\n '최대값', \n '최소값', \n '읽기전용',\n '운전중변경불가', \n '0입력가능', \n '통신쓰기금지', \n '폼메시지', \n '단위', \n '16bit주소', \n '32bit주소', \n '설명'\n]\n# table editor version 4\n_group_columns_info = [\n 'Dummy Key', # parameter 처럼 filtering 기능을 사용하지 않지만 사용함수가 공용이므로 dummy 만듬\n 'Group'\n # 'GroupSize'\n]\n# table editor version 2, 3\n_group_columns_info_old = [\n 'Dummy Key', # parameter 처럼 filtering 기능을 사용하지 않지만 사용함수가 공용이므로 dummy 만듬\n 'Group', \n '보임값', \n '보임비교값'\n]\n\n# read_data 시 yield 되는 tuple 의 index 정보를 나타냄 \n_title_columns_info = [\n 'Dummy Key', # parameter 처럼 filtering 기능을 사용하지 않지만 사용함수가 공용이므로 dummy 만듬\n 'Title',\n 'Enum 이름',\n 'TitleIndex',\n 'Data'\n]\n_data_storage_columns_info = [\n 'Name', \n 'FloatScale',\n 'WordScale'\n]\n_variable_columns_info = [\n 'Dummy Key', # parameter 처럼 filtering 기능을 사용하지 않지만 사용함수가 공용이므로 dummy 만듬\n 'Variable',\n 'Type',\n 'Description'\n]\n_msg_info_columns_info = [\n 'Dummy Key', # parameter 처럼 filtering 기능을 사용하지 않지만 사용함수가 공용이므로 dummy 만듬\n 'MsgName',\n 'MsgComment'\n]\n\n_msg_values_columns_info = [\n 'MsgName', # 숨김 컬럼 \n 'MsgComment', # 숨김 컬럼 \n 'TitleIndex', \n 'Title', \n 'AtValue' \n]\n\n# unit 에 따른 msg 리스트 정의 \n_unit_with_msg = {\n 'U_HZ_RPM': QStringListModel(['F_NOT_TITLE_CHANGE', 'F_TITLE_CHANGE', 'F_NOT_TITLE_CHANGE_SIG', 'F_TITLE_CHANGE_SIG']),\n 'U_B':QStringListModel([ 'F_BIT'+ str(cnt) for cnt in range(2, 17) ] ), \n 'Other': QStringListModel([ *['F_DEX' + str(cnt) for cnt in range(0, 5)], \n *['F_SIG' + str(cnt) for cnt in range(0, 5)], \n 'F_HEX4', 'F_HEX8', 'F_TIME_MIN', 'F_TWO', \n 'F_YMDHM', 'F_RYMDHM', 'F_VER'] )\n}\n\n\ndef para_col_info_for_file_old():\n return _parameters_file_columns_info_old\n\ndef para_col_info_for_file():\n return _parameters_file_columns_info\n\ndef para_col_info_for_view():\n return _parameters_view_columns_info\n \ndef data_storage_columns_info():\n return _data_storage_columns_info\n\ndef title_col_info():\n return _title_columns_info\n \ndef variable_col_info():\n return _variable_columns_info\n\ndef msg_values_col_info():\n return _msg_values_columns_info \n\ndef msg_info_col_info():\n return _msg_info_columns_info\n\ndef group_col_info():\n return _group_columns_info\n\ndef group_col_info_old():\n return _group_columns_info_old\n\ndef unit_with_msg():\n return _unit_with_msg","sub_path":"column_info.py","file_name":"column_info.py","file_ext":"py","file_size_in_byte":4142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"240994620","text":"#!/usr/bin/env python\n# -*- coding=utf-8 -*-\n\n\nimport os\nfrom fuzzywuzzy import fuzz\nfrom fuzzywuzzy import process\nfrom itertools import combinations\nfrom time import sleep\n\nclass Plagiarism_Checker():\n '''\n Use FuzzyWuzzy which instantializes Levenshtein Distance algorithm to \n detect if two code files are similarity.\n '''\n def __init__(self):\n self.file_type = ['python','c','cpp','java','txt']\n self.ratio_boundary = 60\n self.report = []\n print(\"ration_boundary = {}\".format(self.ratio_boundary))\n\n def two_two_combine(self, file_list):\n '''\n Like the name, every two files get into a pair\n '''\n #Too many elements there and sublime ide crash once display them all\n #return list(combinations(file_list,2))\n return combinations(file_list,2)\n\n\n def check_folder(self, path):\n '''\n Check files in this folder if there is plagiarism\n And list out the ranked file names.\n '''\n all_group = self.classify_files_by_type(path)\n print(\"DEBUG all_group: {}\".format(all_group))\n self.report = []\n\n sim_ration = None\n for group in all_group:\n if len(group) > 1:\n combined_list = self.two_two_combine(group)\n for pair in combined_list:\n sim_ratio = self.check_plagiarism(os.path.join(path,pair[0]),\\\n os.path.join(path,pair[1]))\n sub_re = (pair[0], pair[1], sim_ratio)\n self.report.append(sub_re)\n #print(\"{:<40} {:<40} {:<20}\".format(*sub_re))\n #sleep(0.2)\n\n #print(\"DEBUG report: {}\".format(report)) \n return [x for x in self.report if x[2] >= self.ratio_boundary]\n\n\n def get_top_sim(self, n):\n return sorted(self.report, key=lambda x:x[2], reverse=True)[:n]\n\n\n def classify_files_by_type(self, path):\n '''\n Put files into different groups according to their types\n '''\n if not os.path.exists(path):\n return None\n\n all_group = []\n python_list = []\n c_cpp_list = []\n java_list = []\n text_list = []\n\n for item in os.listdir(path):\n file_path = os.path.join(path, item)\n if os.path.isfile(file_path):\n if item.endswith('.py'):\n python_list.append(item)\n elif item.endswith('.java'):\n java_list.append(item)\n elif item.endswith('.c') or item.endswith('.cpp'):\n c_cpp_list.append(item)\n elif item.endswith('.txt'):\n text_list.append(item)\n elif item.endswith('.exe') or '.' not in item:\n continue\n else:\n continue\n #text_list.append(item)\n\n\n all_group.append(python_list)\n all_group.append(c_cpp_list)\n all_group.append(java_list)\n all_group.append(text_list)\n\n return all_group\n\n\n def check_plagiarism(self, file_a, file_b):\n\n str_a = None\n str_b = None\n sim_ratio = None\n try:\n with open(file_a, 'r') as fa:\n str_a = fa.read()\n\n with open(file_b, 'r') as fb:\n str_b = fb.read()\n except Exception as e:\n print(\"error read file, {}\".format(e))\n return False\n\n sim_ratio = self.get_similarity_ratio(str_a, str_b)\n #print(\"DEBUG ratio = {}\".format(sim_ratio))\n\n return sim_ratio\n\n\n def check_file(self, file_a, file_b):\n\n sim_ratio = self.check_plagiarism(file_a, file_b)\n if sim_ratio > self.ratio_boundary:\n return True\n else:\n return False\n\n\n def get_similarity_ratio(self, str_a, str_b):\n return fuzz.ratio(str_a, str_b)\n\n\n def rank_file_by_ratio(self):\n '''\n List and rank the highest to lowest similar ratio file pairs\n '''\n pass\n\n###########Class Plagiarism_Checker()##########################\n\n\nif __name__ == '__main__':\n\n file_a = r'D:\\My_Project\\Code_Jam\\2018_codejam\\2018_7\\Q1\\exe\\SherryRen_q1(3.4).py'\n file_b = r'D:\\My_Project\\Code_Jam\\2018_codejam\\2018_7\\Q1\\exe\\tracy Lou_py3_win64_q1.py'\n file_b = r'D:\\My_Project\\Code_Jam\\2018_codejam\\2018_7\\Q1\\exe\\SherryRen_Copy.py'\n path = r'D:\\My_Project\\Code_Jam\\2018_codejam\\2018_7\\Q1\\exe'\n path = r'D:\\My_Project\\Code_Jam\\2018_codejam\\2018_7\\Q2\\exe'\n\n pc = Plagiarism_Checker()\n ''' \n if pc.check_file(file_a, file_b):\n print(\"DEBUG These two file are simaliar!\")\n else:\n print(\"Debug No problem.\")\n '''\n\n\n sim_pairs = pc.check_folder(path)\n print(\"sim_paris = {}\".format(sim_pairs))\n print(pc.get_top_sim(10))\n\n print(\"Done.\")\n ","sub_path":"2018_7/Q1/plagiarism_checker.py","file_name":"plagiarism_checker.py","file_ext":"py","file_size_in_byte":4866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"458604818","text":"# Import global settings to make it easier to extend settings.\n\nfrom .paths import PROJECT_DIR, PROJECT_NAME, VAR_ROOT, E\nimport os\n\n# ==============================================================================\n# Generic Django project settings\n# ==============================================================================\nDEBUG = E.DEBUG or False\nTEMPLATE_DEBUG = DEBUG\n\nTIME_ZONE = 'GMT'\nUSE_I18N = True\nSITE_ID = 1\n\nSECRET_KEY = E.SECRET_KEY\n\nVAR_ROOT = E.VAR_ROOT or VAR_ROOT ## Var root can be overriden\n\n# Language code for this installation. All choices can be found here:\n# http://www.i18nguy.com/unicode/language-identifiers.html\nLANGUAGE_CODE = 'en-US'\n\nADMINS = (\n ('Administrator', E.ADMIN_EMAIL or \"admin@unholster.com\"),\n)\n\nMANAGERS = ADMINS\n\nALLOWED_HOSTS = '*'\n\n# ==============================================================================\n# Project URLS and media settings\n# ==============================================================================\n\nROOT_URLCONF = PROJECT_NAME + '.conf.urls'\n\nLOGIN_URL = 'login'\nLOGOUT_URL = 'logout'\nLOGIN_REDIRECT_URL = '/'\n\n\n# =============================================================================\n# Static files and frontend\n# =============================================================================\nFRONT_BUILD_DIR = 'static'\nSTATIC_DOMAIN = E.STATIC_DOMAIN\nSTATIC_URL = E.STATIC_URL or ('/static/%s/' % PROJECT_NAME)\n\nSTATIC_ROOT = E.STATIC_ROOT or os.path.join(VAR_ROOT, 'static')\n\nSTATICFILES_DIRS = (\n os.path.join(PROJECT_DIR, PROJECT_NAME, FRONT_BUILD_DIR),\n)\n\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n)\n\nMEDIA_URL = E.MEDIA_URL or ('/uploads/%s/' % PROJECT_NAME)\nMEDIA_ROOT = E.MEDIA_ROOT or os.path.join(VAR_ROOT, 'uploads')\n\n\n# ==============================================================================\n# Middlewares\n# ==============================================================================\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\n# ==============================================================================\n# Templates\n# ==============================================================================\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [os.path.join(PROJECT_DIR, PROJECT_NAME, 'templates')],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\n# =============================================================================\n# Databases\n# =============================================================================\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql',\n 'NAME': E.DATABASE_NAME,\n 'USER': E.DATABASE_ROLE,\n 'PASSWORD': E.DATABASE_PASSWORD,\n 'HOST': E.DATABASE_HOST,\n 'PORT': E.DATABASE_PORT or '5432',\n }\n}\n\n# =============================================================================\n# Caching\n# =============================================================================\nCACHES = {\n 'default': {\n 'BACKEND': 'redis_cache.RedisCache',\n 'LOCATION': E.CACHE_URL or 'redis://localhost:6379/0',\n }, 'staticfiles': {\n 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',\n 'LOCATION': 'staticfiles',\n }, 'template_cache': {\n 'BACKEND': 'redis_cache.RedisCache',\n 'LOCATION': E.TEMPLATE_CACHE_URL or 'redis://localhost:6379/9',\n }\n}\n\n# =============================================================================\n# Apps\n# =============================================================================\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.humanize',\n 'django.contrib.staticfiles',\n 'password_reset',\n 'channels',\n 'rest_framework_swagger',\n 'rest_framework',\n '{{project_name}}.apps.base',\n 'django_celery_beat',\n)\n\n# =============================================================================\n# Logging\n# =============================================================================\nLOG_DIR = E.LOG_DIR or os.path.join(VAR_ROOT, 'log')\nos.makedirs(LOG_DIR, mode=0o777, exist_ok=True)\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': True,\n 'formatters': {\n 'dev_friendly': {\n 'format': '[%(levelname)7s] %(asctime)s %(name)20s %(lineno)3d | %(message)s' # noqa\n },\n 'verbose': {\n 'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'\n # noqa\n },\n },\n 'handlers': {\n 'request_handler': {\n 'level': 'INFO',\n 'class': 'logging.handlers.RotatingFileHandler',\n 'filename': os.path.join(LOG_DIR, 'request.log'),\n 'maxBytes': 1024 * 1024 * 5,\n 'backupCount': 5,\n 'formatter': 'verbose',\n },\n 'base_handler': {\n 'level': 'INFO',\n 'class': 'logging.handlers.RotatingFileHandler',\n 'filename': os.path.join(LOG_DIR, 'django.log'),\n 'maxBytes': 1024 * 1024 * 5,\n 'backupCount': 5,\n 'formatter': 'verbose',\n },\n 'console': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler',\n 'formatter': 'dev_friendly'\n },\n 'commands': {\n 'level': 'INFO',\n 'class': 'logging.handlers.RotatingFileHandler',\n 'filename': os.path.join(LOG_DIR, 'commands.log'),\n 'maxBytes': 1024 * 1024 * 5,\n 'backupCount': 5,\n 'formatter': 'verbose',\n },\n },\n 'loggers': {\n 'django.request': {\n 'handlers': ['console', 'request_handler'],\n 'level': 'DEBUG',\n },\n },\n 'root': {\n 'level': 'INFO',\n 'handlers': ['console', 'base_handler'],\n },\n}\n\n# =============================================================================\n# Celery\n# =============================================================================\n\nBROKER_URL = E.CELERY_URL or 'redis://localhost:6379/1'\nCELERY_RESULT_BACKEND = BROKER_URL or 'redis://localhost:6379/1'\nCELERYD_MAX_TASKS_PER_CHILD = E.CELERYD_MAX_TASKS_PER_CHILD or 1\nBROKER_POOL_LIMIT = E.CELERY_POOL_LIMIT or 0\n\n\n# =============================================================================\n# CHANNELS\n# =============================================================================\n\n\nCHANNEL_LAYERS = {\n \"default\": {\n \"BACKEND\": \"asgi_redis.RedisChannelLayer\",\n \"CONFIG\": {\n \"hosts\": [(os.environ.get('REDIS_HOST', 'localhost'), 6379)],\n },\n \"ROUTING\": \"{{project_name}}.apps.base.routing.channel_routing\",\n },\n}\n\n# =============================================================================\n# REST_FRAMEWORK\n# =============================================================================\n\n\nREST_FRAMEWORK = {\n 'DEFAULT_PERMISSION_CLASSES': [\n 'rest_framework.permissions.IsAuthenticatedOrReadOnly',\n ],\n 'DEFAULT_AUTHENTICATION_CLASSES': (\n 'rest_framework.authentication.SessionAuthentication',\n 'rest_framework.authentication.BasicAuthentication',\n 'rest_framework_jwt.authentication.JSONWebTokenAuthentication',\n ),\n 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',\n 'PAGE_SIZE': 20,\n 'DEFAULT_PARSER_CLASSES': (\n 'rest_framework.parsers.JSONParser',\n 'rest_framework.parsers.FormParser',\n 'rest_framework.parsers.MultiPartParser',\n ),\n}\n\nJWT_AUTH = {\n # 'JWT_EXPIRATION_DELTA': datetime.timedelta(minutes=5),\n 'JWT_ALLOW_REFRESH': True,\n}\n","sub_path":"project_name/conf/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":8390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"342598545","text":"\n#from django.contrib import admin\nfrom django.urls import path, include\n#from django.conf.urls.static import static\n#from django.conf import settings\nfrom .views import *\n\nurlpatterns = [\n path('buscar', buscar_publicacao),\n path('chart', buscar_grafico),\n path('chartpizza', buscar_grafico_pizza),\n path('filtro_producoes_grande_area', buscar_publicacoes_por_grande_area),\n path('filtro_producoes_area', buscar_publicacoes_por_area_conhecimento),\n #path(\"docentes_em_varias_areas\", mostrarProfQueAtuamEmDuasAreas),\n path(\"informacoes_docentes\",buscar_informacoes_pesquisador),\n path(\"artigo_qualis\", buscar_artigos_com_qualis),\n path('areas',buscar_areas_por_grande_area),\n path('salvar_pesquisadores',salvar_pesquisadores),\n path('buscar_grupos_de_pesquisa',buscar_grupos_de_pesquisa),\n path('buscar_informacoes_grupo',buscar_informacoes_grupo),\n path('buscar_data_atualizacao',buscar_data_atualizacao),\n path('verificar_situacao_do_grupo',verificar_situacao_do_grupo),\n path('atualizar_matriz_com_recomendacoes',atualizar_matriz_com_recomendacoes),\n path('buscar_artigos_do_email',buscar_artigos_do_email),\n path('desinscrever_pesquisador_do_recebimento_de_emails',desinscrever_pesquisador_do_recebimento_de_emails),\n path('gerar_filtros_da_pagina_busca',gerar_filtros_da_pagina_busca),#usado para pegar os filtros para a pagina de busca\n path('todos_os_pesquisadores',todos_os_pesquisadores),\n path('buscar_vertices_e_arestas',buscar_vertices_e_arestas),\n path('estatisticas_da_pagina_principal',estatisticas_da_pagina_principal),\n path('buscar_patentes_aleatorias', buscar_patentes_aleatorias),\n path('receber_email',receber_email),\n path('gerar_publicacoes_pesquisadores',gerar_publicacoes_pesquisadores)\n]","sub_path":"src/article/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"627199528","text":"from dist_train_utils import load_batch\nfrom keras.utils import Sequence\nfrom tqdm import tqdm\nimport numpy as np\n\n# Here, `x_list` is list of path to the protein ligand pairs\n# and `y_list` are the associated classes.\n\nclass TrainSequence(Sequence):\n\n def __init__(self, x_list, y_list, batch_size):\n self.x, self.y = x_list, y_list\n self.batch_size = batch_size\n\n def __len__(self):\n return int(np.ceil(len(self.x) / float(self.batch_size)))\n\n def __getitem__(self, idx):\n start = idx * self.batch_size\n end = (idx + 1) * self.batch_size\n batch_x = self.x[start:end] # batch of protein, ligand filepath pairs\n batch_y = self.y[start:end]\n x = load_batch(batch_x)\n print(\"Check {}\".format(x.shape))\n return x, batch_y","sub_path":"dist_train_sequence.py","file_name":"dist_train_sequence.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"256638678","text":"#!/usr/bin/env pipenv-shebang\n\nimport os\nimport sys\nimport argparse\n\nfrom datetime import datetime\n\nimport edit_entry\nimport imdbutils\nimport listings\nimport stats\nimport data\n\nfrom entry import Entry\n\ndef do_add(args):\n \"\"\"CLI func to call when doing subtask \"add\". \"\"\"\n # Three kinds of cases here in a mess:\n # 1. movie name provided (possibly other info) (go batch)\n # 2. movie name not provided, IMDB provided (go batch)\n # 3. neither is provided, go interactive\n # TODO refactoring needed. Replace functions and so on.\n db = data.DataFacilities(dbfile=args.db_file)\n if args.movie:\n # 1. batch\n newflick = Entry(args.movie,\n args.rating,\n args.imdb,\n args.message or \"\")\n newflick.imdb = imdbutils.just_work_dammit(newflick,\n skip=args.skip_imdb)\n elif args.imdb:\n # 2. batch: No movie name given but something of an IMDB id\n if args.skip_imdb:\n print(\"Can't do this without querying IMDB!\")\n return\n try:\n moviename = imdbutils.query_imdb_name(args.imdb)\n clean_id = imdbutils.clean_imdb_id(args.imdb)\n except imdbutils.BadIMDBIdException:\n print(\"Malformed IMDB id, aborting.\")\n return\n except imdbutils.NoIMDBpyException:\n print(\"Can't do this without querying IMDB! (No IMDBpy)\")\n return\n newflick = Entry(moviename, imdb=clean_id, rating=args.rating,\n message=args.message or \"\")\n else:\n # 3. interactive\n try:\n newflick = edit_entry.edit_data_interactive(None,\n skip_imdb=args.skip_imdb)\n if not args.skip_imdb:\n from textwrap import fill\n #triv = imdbutils.query_random_trivia(newflick.imdb)\n #if triv: print 'TRIVIA:',fill(triv )\n except edit_entry.UserCancel:\n print(\"Empty name, exiting...\")\n return 0\n if args.debug:\n print(newflick.__dict__)\n else:\n db.store_entry(newflick)\n\ndef do_edit(args):\n \"\"\"Handle subtask \"edit\".\"\"\"\n db = data.DataFacilities(dbfile=args.db_file)\n\n try:\n lastentry = db.get_last()\n except data.EmptyDBException:\n print(\"Nothing in the DB!\")\n return\n\n if args.delete:\n if args.debug:\n print(\"Would delete:\", vars(lastentry))\n return\n else:\n db.delete_entry()\n return\n # at this point, only edit the last one\n edited = edit_entry.edit_data_interactive(lastentry,\n skip_imdb=args.skip_imdb)\n db.set_entry(edited)\n\ndef _create_and_parse_args(argv):\n \"\"\"Create the arg parser and do the magic.\"\"\"\n psr = argparse.ArgumentParser(description=\"A movie diary.\")\n subparser = psr.add_subparsers(\n title=\"Subtasks\",\n help=None)\n\n psr.add_argument('-I', '--skip-imdb', action='store_true',\n help=\"Don't query IMDB\")\n psr.add_argument('-f', '--db-file', action='store',\n help='Specify a database other than default.')\n psr.add_argument('-C', '--nocolor', action='store_true',\n help=\"Disable color output.\")\n psr.add_argument('-D', '--debug', action='store_true',\n help=\"Debug and dry-run.\")\n\n addparser = subparser.add_parser('add',\n help=\"Add new entry\")\n addparser.add_argument(\"movie\",\n nargs='?',\n help=\"Movie name\")\n addparser.add_argument(\"-r\", \"--rating\", type=float,\n help=\"How did you like the movie?\")\n addparser.add_argument(\"-i\", \"--imdb\",\n help=\"IMDB id or URL\")\n addparser.add_argument(\"-m\", \"--message\",\n help=\"A few words about the experience.\")\n addparser.set_defaults(func=do_add)\n\n listparser = subparser.add_parser('list',\n help='List entries')\n listparser.add_argument(\"format\", nargs='?',\n help='Select the format style.',\n choices=[\"compact\", \"csv\", \"full\", \"org\"])\n listparser.add_argument('-E', '--exp', action='store_true',\n help='Exponential graph')\n listparser.add_argument('-t', '--title', action='store',\n help='Grep titles.')\n listparser.add_argument('-m', '--message', action='store',\n help='Grep messages.')\n listparser.add_argument('--ge', type=float,\n help='Show films with rating greater or equal than')\n listparser.add_argument('--gt', type=float,\n help='Show films with rating greater than')\n listparser.add_argument('--le', type=float,\n help='Show films with rating less or equal than')\n listparser.add_argument('--lt', type=float,\n help='Show films with rating less than')\n\n def isodate(s):\n \"\"\"Try and parse ISO date into a datetime. Raise ArgumentTypeError for\n argparse to handle in case of bad data.\"\"\"\n try:\n return datetime.strptime(s, \"%Y-%m-%d\")\n except ValueError:\n raise argparse.ArgumentTypeError(\"Invalid date.\")\n\n listparser.add_argument('-b', '--begin', type=isodate,\n help='Show entries stored after the date [YYYY-MM-DD] inclusive.')\n listparser.add_argument('-e', '--end', type=isodate,\n help='Show entries stored before the date [YYYY-MM-DD] exclusive.')\n\n listparser.add_argument('-S', '--sort-name', action='store_true',\n help='Sort results by name.')\n listparser.add_argument('-R', '--sort-rating', action='store_true',\n help='Sort results by rating.')\n listparser.add_argument('-A', '--asc', action='store_true',\n help='Sort ascending')\n listparser.add_argument('-D', '--desc', action='store_true',\n help='Sort descending')\n listparser.add_argument('-F', '--list-format', action='store',\n help='Format used for compact listings. Supported variables are: '\n + ', '.join(['{' + a + '}'\n for a in listings.LIST_FORMAT_VARS]))\n listparser.set_defaults(format='compact', func=listings.do_list)\n\n # edit section will be limited to the last one for the time being.\n editparser = subparser.add_parser('edit',\n help='Edit entries')\n editparser.add_argument(\"-d\", \"--delete\", action=\"store_true\",\n help='Delete last entry')\n editparser.set_defaults(func=do_edit)\n\n # statistics\n statparser = subparser.add_parser('stats',\n help='See statistics.')\n statparser.add_argument('-d', '--days', type=int,\n help='Observe only last d days.')\n statparser.add_argument('-H', '--html', action=\"store_true\",\n help='Output HTML')\n statparser.add_argument('-o', '--open', action='store_true',\n help='Open result instantly in a browser.')\n statparser.set_defaults(func=stats.activity_calendar)\n\n # Diagnose about IMDB query support\n try:\n imdbutils.get_imdb_server()\n support = 'YES'\n except imdbutils.NoIMDBpyException:\n support = 'NO'\n psr.epilog = f'IMDB support = {support}'\n\n if len(argv) == 0: psr.parse_args(['-h'])\n args = psr.parse_args(argv)\n\n # check for environ variables and set if no option given\n if not args.db_file: args.db_file = os.environ.get('MOARY_MOVIEDB', '')\n\n return args\n\ndef main(argv):\n \"\"\" Inspect arguments and dispatch to subtasks. \"\"\"\n args = _create_and_parse_args(argv)\n args.func(args)\n\n\nif __name__ == '__main__':\n \"\"\"parse args and direct execution towards the right func.\"\"\"\n main(sys.argv[1:])\n","sub_path":"moary.py","file_name":"moary.py","file_ext":"py","file_size_in_byte":7643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"645590229","text":"from artmuc.models import User, Post, db\nfrom artmuc.forms import RegistrationForm, LoginForm, UpdateAccountForm, UpdateCustomerAccountForm , PostForm\nfrom artmuc.mail import Mailer\nfrom flask import render_template, request, redirect, session, url_for, flash, jsonify\nfrom artmuc import app\nfrom artmuc.evalid import validate_product_price, validate_artist_key\nfrom flask_login import login_user, current_user, logout_user, login_required\nimport itsdangerous\nimport hashlib\nimport time\nfrom PIL import Image\nimport os\nimport json\n\n#Payment Gateway\nimport stripe\n\nfrom wtforms.validators import ValidationError\n\nstripe.api_key='sk_test_KoGkq0vUMMXep9fYOrOGWwR700oUIcU9JD'\nstripe_pub = 'pk_test_20KQ2Vis1VY9ep6JnWeQxssn00NkW1agQW'\n\n@app.route('/')\ndef home():\n return render_template('home.html', title='Home')\n\ndef load_accounts():\n accounts = User.query.all()\n return accounts\n\n@app.route('/profiles')\ndef profiles():\n accounts = load_accounts()\n return render_template('profiles.html', accounts = accounts)\n\n@app.route('/registration', methods=['GET', 'POST'])\ndef registration():\n if current_user.is_authenticated:\n return redirect('/')\n form = RegistrationForm()\n if form.validate_on_submit():\n if form.artist_key.data:\n if validate_artist_key(form.artist_key.data) == False:\n flash(f'Artist Key Incorrect, Please Continue As A Customer Or Double Check', 'danger')\n return redirect('/registration')\n sha = hashlib.sha384()\n sha.update(form.password.data.encode('utf-8'))\n hashed_password = str(sha.hexdigest())\n user = User(username=form.username.data, email=form.email.data, artist_key=form.artist_key.data, password=hashed_password, verified=\"0\")\n db.session.add(user)\n db.session.commit()\n Mailer(form.email.data, \"Validate\", token=itsdangerous.url_safe.URLSafeTimedSerializer('checkthoseemails').dumps(form.email.data))\n flash(f'Your Account Has Been Created! Please confirm your email! Double check your email and spam for confirmation!', 'success')\n return redirect(url_for('home'))\n return render_template('register.html', title='Registration', form=form)\n\n@app.route('/email_validation/')\ndef email_validation(token):\n try:\n email = itsdangerous.url_safe.URLSafeTimedSerializer('checkthoseemails').loads(token, max_age=600)\n user = User.query.filter_by(email=email).first()\n user.verified = \"1\"\n #db.session.add(User(verified=\"1\"))\n db.session.commit()\n return redirect(url_for('login'))\n except itsdangerous.exc.BadTimeSignature:\n return render_template('tokeninvalid.html')\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n if current_user.is_authenticated:\n return redirect('/')\n form = LoginForm()\n if form.validate_on_submit():\n user = User.query.filter_by(email=form.email.data).first()\n encoded_password = str(form.password.data).encode('utf-8')\n sha = hashlib.sha384()\n sha.update(encoded_password)\n hashed_password = str(sha.hexdigest())\n # User Validation\n if user and hashed_password == user.password:\n if user.verified == '1':\n login_user(user)\n return redirect('/account')\n Mailer(form.email.data, \"Validate\", token=itsdangerous.url_safe.URLSafeTimedSerializer('checkthoseemails').dumps(form.email.data))\n flash('Account unverified, email sent. Check your spam!')\n else:\n flash('Login Failed. Please Check Email And Password', 'danger')\n return render_template('login.html', title='Login', form=form)\n\n@app.route('/logout')\ndef logout():\n logout_user()\n return redirect('/')\n\ndef save_picture(form_picture):\n sha = hashlib.sha256()\n f_ext = os.path.splitext(form_picture.filename)[1]\n sha.update((str(time.time()) + form_picture.filename).encode('utf-8'))\n generated_filename = str(sha.hexdigest()) + str(f_ext)\n picture_path = os.path.join(app.root_path, 'static/media/profile_pics', generated_filename)\n output_size = (250, 250)\n i = Image.open(form_picture)\n i.thumbnail(output_size)\n i.save(picture_path)\n return generated_filename\n\n@app.route('/delete/', methods=['GET'])\n@login_required\ndef delete_product(product_picture_path):\n products = Post.query.all()\n owner = ''\n target = None\n for product in products:\n if product.product_picture_path == str(product_picture_path):\n owner = product.author.username\n target = product\n picture_path = os.path.join(app.root_path, 'static/media/products', target.product_picture_path)\n if target.is_sold != 'Yes':\n os.remove(picture_path)\n else:\n flash('You Are Not Allowed To Delete A Product That Has Been Sold', 'danger')\n return redirect('/account')\n if current_user.username != owner:\n flash('You Are Not Allowed To Change Data For This User [Error \"validation\"]', 'danger')\n return redirect('/account')\n else:\n db.session.delete(target)\n db.session.commit()\n return redirect('/')\n\n@app.route('/account', methods=['GET', 'POST'])\n@login_required\ndef account():\n if current_user.artist_key:\n form = UpdateAccountForm()\n if form.validate_on_submit():\n if form.picture.data:\n picture_file = save_picture(form.picture.data)\n current_user.profile_picture_path = picture_file\n current_user.username = form.username.data\n current_user.email = form.email.data\n current_user.biography = form.biography.data\n db.session.commit()\n flash('Your Account Has Been Updated', 'success')\n return redirect('/account')\n elif request.method == 'GET':\n form.username.data = current_user.username\n form.email.data = current_user.email\n form.biography.data = current_user.biography\n profile_picture_path = url_for('static', filename='media/profile_pics/' + current_user.profile_picture_path)\n products = Post.query.all()\n return render_template('account.html', title='account',\n profile_picture_path=profile_picture_path, form=form, products=products)\n else:\n form = UpdateCustomerAccountForm()\n if form.validate_on_submit():\n current_user.username = form.username.data\n current_user.country = form.country.data\n current_user.address = form.address.data\n current_user.region = form.region.data\n current_user.zip = form.zip.data\n db.session.commit()\n flash('Your Account Has Been Updated', 'success')\n return redirect('/account')\n elif request.method == 'GET':\n form.username.data = current_user.username\n form.country.data = current_user.country\n form.address.data = current_user.address\n form.region.data = current_user.region\n form.zip.data = current_user.zip\n profile_picture_path = url_for('static', filename='media/profile_pics/' + current_user.profile_picture_path)\n products = Post.query.all()\n return render_template('account.html', title='account',\n profile_picture_path=profile_picture_path, form=form, products=products)\ndef save_product_picture(form_picture):\n sha = hashlib.sha256()\n f_ext = os.path.splitext(form_picture.filename)[1]\n sha.update((str(time.time()) + form_picture.filename).encode('utf-8'))\n generated_filename = str(sha.hexdigest()) + str(f_ext)\n picture_path = os.path.join(app.root_path, 'static/media/products', generated_filename)\n output_size = (500, 500)\n i = Image.open(form_picture)\n i.thumbnail(output_size)\n i.save(picture_path)\n return generated_filename\n\n@app.route('/post/new', methods=['GET', 'POST'])\n@login_required\ndef new_post():\n product_price_errors = []\n form = PostForm()\n if form.validate_on_submit():\n product_price_errors = validate_product_price(form.product_price.data)\n if form.picture.data and not product_price_errors:\n picture_file = save_product_picture(form.picture.data)\n post = Post(product_name = form.product_name.data, product_description = form.product_description.data, product_picture_path = picture_file, product_price = form.product_price.data, user_id = current_user.id, is_sold = 'No')\n db.session.add(post)\n db.session.commit()\n return redirect('/')\n return render_template('newpost.html', title='Add A Product', form=form, product_price_errors=product_price_errors)\n\n@app.route('/gallery/', methods=['GET'])\ndef visit_artist(artist):\n artist_data = User.query.filter_by(username=artist).first()\n products = Post.query.all()\n return render_template('gallery.html', title='Gallery', author=artist, products=products, artist_data=artist_data, key=stripe_pub)\n\n\ndef get_product(product_id):\n products = Post.query.all()\n for product in products:\n if product.id == product_id:\n return product\n return False\n\n@app.route('/product/', methods=['GET'])\n@login_required\ndef view_product(id):\n product = get_product(int(id))\n return render_template('product.html', product=product, key=stripe_pub)\n\n@app.route('/charge', methods=['POST', 'GET'])\n@login_required\ndef charge():\n response = jsonify('error')\n response.status_code = 500\n product = get_product(int(request.json['product']))\n\n purchased_product_data = {\n 'product_id' : product.id,\n 'product_name' : product.product_name,\n 'product_description' : product.product_description,\n 'product_price' : product.product_price,\n 'product_author_username' : product.author.username,\n 'product_author_email' : product.author.email\n }\n\n if product:\n users = User.query.all()\n try:\n product = get_product(int(request.json['product']))\n customer = stripe.Customer.create(\n email=current_user.email,\n source=request.json['token']\n )\n\n # This Works Fine // Charges Card And Pays To Stripe Account\n stripe.Charge.create(\n customer=customer.id,\n amount=int(product.product_price * 100),\n currency='eur',\n application_fee_amount=int(product.product_price * 100 * 0.05), # 5% platform fee\n transfer_data={\n 'destination': product.author.stripe_account_id\n }\n )\n response = jsonify('success')\n response.status_code = 202\n product.is_sold = 'Yes'\n\n ###############################\n # DEBUG CAREFULLY #\n ###############################\n product.customer_email = current_user.email\n product.customer_country = current_user.country\n product.customer_region = current_user.region\n product.customer_address = current_user.address\n product.customer_zip = current_user.zip\n db.session.commit()\n # SEND PAYMENT CONFIRMATION MAILS HERE\n\n except stripe.error.StripeError:\n return response\n return response\n\n@app.route('/stripe_checkout', methods=['GET', 'POST'])\n@login_required\ndef stripe_checkout():\n auth_token = request.args.get('code')\n try:\n response = stripe.OAuth.token(\n grant_type='authorization_code',\n code=auth_token\n )\n current_user.stripe_account_id = response['stripe_user_id']\n db.session.commit()\n flash(\"You'r Payment Method Has Been Added, You Can Now Receive Payments!\", 'success')\n except Exception as Checkout_Error:\n flash('Checkout Failed, Payment Method Not Active!', 'danger')\n\n return redirect('/account')\n\n@app.route('/balances', methods=['GET'])\n@login_required\ndef balances():\n stripe_balance = stripe.Balance.retrieve(stripe_account=current_user.stripe_account_id)\n pending_balance = stripe_balance['pending'][0]['amount']\n available_balance = stripe_balance['available'][0]['amount']\n stripe_balance_data = {'available' : available_balance, 'pending' : pending_balance}\n products = Post.query.all()\n if not current_user.artist_key:\n return redirect('/account')\n flash(\"You Don't Have Permission To View Artist-Panels\", 'danger')\n return render_template('balances.html', artist_data=current_user, products=products, stripe_balance_data = stripe_balance_data)\n\n#id = db.Column(db.Integer, primary_key=True)\n#product_name = db.Column(db.String(25), nullable=False)\n#product_description = db.Column(db.String(250), nullable=False)\n#product_picture_path = db.Column(db.String(260), nullable=False)\n#product_price = db.Column(db.Float, nullable=False)\n#user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)\n#is_sold = db.Column(db.Integer, nullable=False)\n","sub_path":"artmuc/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":13066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"507800514","text":"# -*- coding: utf-8 -*-\nfrom django.urls import path, include\nfrom rest_framework import routers\n\nfrom . import api\nfrom . import views\n\n\napp_name = 'pyfb-endpoint'\n\nrouter = routers.DefaultRouter()\nrouter.register(r'customerendpoint', api.CustomerEndpointViewSet)\nrouter.register(r'codec', api.CodecViewSet)\nrouter.register(r'providerendpoint', api.ProviderEndpointViewSet)\n\n\nurlpatterns = (\n # urls for Django Rest Framework API\n path('api/v1/', include(router.urls)),\n)\n\nurlpatterns += (\n # urls for CustomerEndpoint\n path('pyfb_endpoint/customerendpoint/', views.CustomerEndpointListView.as_view(), name='pyfb_endpoint_customerendpoint_list'),\n path('pyfb_endpoint/customerendpoint/create/', views.CustomerEndpointCreateView.as_view(), name='pyfb_endpoint_customerendpoint_create'),\n path('pyfb_endpoint/customerendpoint/detail//', views.CustomerEndpointDetailView.as_view(), name='pyfb_endpoint_customerendpoint_detail'),\n path('pyfb_endpoint/customerendpoint/update//', views.CustomerEndpointUpdateView.as_view(), name='pyfb_endpoint_customerendpoint_update'),\n)\n\nurlpatterns += (\n # urls for Codec\n path('pyfb_endpoint/codec/', views.CodecListView.as_view(), name='pyfb_endpoint_codec_list'),\n path('pyfb_endpoint/codec/create/', views.CodecCreateView.as_view(), name='pyfb_endpoint_codec_create'),\n path('pyfb_endpoint/codec/detail//', views.CodecDetailView.as_view(), name='pyfb_endpoint_codec_detail'),\n path('pyfb_endpoint/codec/update//', views.CodecUpdateView.as_view(), name='pyfb_endpoint_codec_update'),\n)\n\nurlpatterns += (\n # urls for ProviderEndpoint\n path('pyfb_endpoint/providerendpoint/', views.ProviderEndpointListView.as_view(), name='pyfb_endpoint_providerendpoint_list'),\n path('pyfb_endpoint/providerendpoint/create/', views.ProviderEndpointCreateView.as_view(), name='pyfb_endpoint_providerendpoint_create'),\n path('pyfb_endpoint/providerendpoint/detail//', views.ProviderEndpointDetailView.as_view(), name='pyfb_endpoint_providerendpoint_detail'),\n path('pyfb_endpoint/providerendpoint/update//', views.ProviderEndpointUpdateView.as_view(), name='pyfb_endpoint_providerendpoint_update'),\n)\n","sub_path":"pyfb_endpoint/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"312753719","text":"# Heuristica = quantRainhas em conflito - 1\n\nclass NRainhas:\n def __init__(self, tamanho):\n self.tamanho = tamanho\n self.colunas = []\n \n def inserirNaProxColuna(self, coluna):\n self.colunas.append(coluna)\n \n def removerDaColunaAtual(self):\n return self.colunas.pop()\n \n def verificaProximaColuna(self, coluna, cont):\n linha = len(self.colunas)\n # verifica coluna\n for rainha in self.colunas:\n if coluna == rainha:\n cont += 1\n return cont, False\n\n # verifica diagonal\n for linhaDaRainha, colunaDaRainha in enumerate(self.colunas):\n if colunaDaRainha - linhaDaRainha == coluna - linha:\n cont += 1\n return cont, False\n \n # verifica outra diagonal\n for linhaDaRainha, colunaDaRainha in enumerate(self.colunas):\n if ((self.tamanho - colunaDaRainha) - linhaDaRainha == (self.tamanho - coluna) - linha):\n cont += 1\n return cont, False\n return cont, True\n \n def printarTabuleiro(self):\n print(\"\\n Solução -> \" + str(self.colunas) + \"\\n\\n\")\n \ndef solucaoComBuscaLocal(tamanho):\n tabuleiro = NRainhas(tamanho)\n linha = 0\n coluna = 0\n cont = 1\n solucaoEncontrada = False\n while True:\n # inserir rainha na prox linha\n while coluna < tamanho:\n heuristica, verificador = tabuleiro.verificaProximaColuna(coluna, cont)\n if verificador:\n tabuleiro.inserirNaProxColuna(coluna)\n linha += 1\n coluna = 0\n heuristica -= 1\n break\n else:\n heuristica += 1\n coluna += 1\n \n # se não temos uma coluna para inserir, ou se o tabuleiro está cheio\n if (coluna == tamanho or linha == tamanho):\n # se o tabuleiro está cheio, e heuristica 0, então temos uma solução\n if linha == tamanho and heuristica == 0:\n tabuleiro.printarTabuleiro()\n tabuleiro.removerDaColunaAtual()\n linha -= 1\n solucaoEncontrada = True\n try:\n if solucaoEncontrada:\n break\n else:\n colunaAnterior = tabuleiro.removerDaColunaAtual()\n except IndexError: #error handler - Index de vetor invalido\n break\n # tentar linha anterior e coluna novamente\n linha -= 1\n coluna = 1 + colunaAnterior\n\n \n \nn = int(input('Tamanho do tabuleiro (n): '))\nsolucaoComBuscaLocal(n)","sub_path":"4RainhasBuscaLocal.py","file_name":"4RainhasBuscaLocal.py","file_ext":"py","file_size_in_byte":2638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"309538506","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nimport json\ntry:\n from urllib import request as urllib2\nexcept ImportError:\n import urllib2\nimport time\nimport logging\n\n\n# Convet time to timestamp. ex: 2016-5-6 4:28:54 -> 1462480134\ntime2timestamp = lambda t: int(time.mktime(time.strptime(t, \"%Y-%m-%d %H:%M:%S\")))\n\n# Create log\nlogging.basicConfig(filename='/tmp/zabbix_maintenance' + time.strftime('%Y%m%d',time.localtime()) + '.log', format='[%(asctime)s] %(levelname)s: %(message)s', level=logging.INFO)\n\nclass Maintenance(object):\n \"\"\"\n Provide create,delete,get,update function for maintenance\n \"\"\"\n\n def __init__(self, url, user, password):\n \"\"\"\n @param:\n url: string type\n param user: string type\n param password: string type\n\n @ret:\n None\n \"\"\"\n self.url = url + '/api_jsonrpc.php'\n self.user = user\n self.password = password\n self.auth = self._get_token(self.url, self.user, self.password)\n\n def _get_token(self, url, user, password):\n \"\"\"\n @fuction:\n Get the login token\n\n @param:\n url: string type\n param user: string type\n param password: string type\n\n @ret:\n token: string type\n \"\"\"\n url = url\n user = user\n password = password\n header = {'Content-Type': 'application/json'}\n req_json = {\n 'jsonrpc': '2.0',\n 'method': 'user.login',\n 'params': {\n 'user': user,\n 'password': password\n },\n 'id': 1\n }\n\n v = json.dumps(req_json).encode('utf-8')\n req = urllib2.Request(url, headers=header, data=v)\n\n try:\n res = urllib2.urlopen(req)\n except Exception as e:\n # print('Please check your name and password: ' + str(e))\n logging.error('Please check your name and password: ' + str(e))\n else:\n response = res.read()\n page = response.decode('utf-8')\n page = json.loads(page)\n res.close()\n token = page.get('result')\n\n return token\n\n def call_api(self, method, params=None):\n \"\"\"\n @function:\n Call the zabbix api\n\n @param:\n method: string type, the value is the name of related zabbix api\n params: type depending on zabbix api\n\n @ret:\n result: list or dict type, if call zabbix api error occurs, then type is dict else is list\n \"\"\"\n header = {'Content-Type': 'application/json'}\n req_json = {\n 'jsonrpc': '2.0',\n 'method': method,\n 'params': params,\n 'auth': self.auth,\n 'id': 2\n }\n v = json.dumps(req_json).encode('utf-8')\n # print('url: %s, headers: %s, data: %s' %(self.url, header, req_json))\n logging.info('url: %s, headers: %s, data: %s' %(self.url, header, req_json))\n req = urllib2.Request(self.url, headers=header, data=v)\n try:\n res = urllib2.urlopen(req)\n except Exception as e:\n # print('Check error info:' + str(e))\n logging.error('Check error info:' + str(e))\n else:\n response = res.read()\n # print('response: %s' %response)\n logging.error('response: %s' %response)\n page = response.decode('utf-8')\n page = json.loads(page)\n res.close()\n result = page.get('result') or page.get('error')\n\n return result\n\n def create(self, hostids, name, since, till):\n \"\"\"\n @function:\n Create maintenance window\n\n @param:\n hostids: list type and element type is int, the hosts need to be maintenanced\n name: string type, is the name of maintenance window\n since: int(the seconds since 1970), maintenance window start time\n till: int (the seconds since 1970), maintenance window end time\n\n @ret:\n ret: list or dict type, if call zabbix api error occurs, then type is dict else is list\n \"\"\"\n start = int(since) + 3\n period = int(till) - int(since)\n c_params = {\n 'name': name,\n 'active_since': since,\n 'active_till': till,\n 'description': name,\n 'maintenance_type': 0,\n 'groupids': [\n ],\n 'hostids': hostids or [],\n 'timeperiods': [\n {\n 'timeperiod_type': 0,\n 'start_date': start,\n 'period': period\n }\n ]\n }\n\n ret = self.call_api('maintenance.create', c_params).get('maintenanceids') or self.call_api('maintenance.create', c_params)\n return ret\n\n def delete(self, maintenanceids = []):\n \"\"\"\n @function:\n Delete maintenance window\n \n @param:\n maintenanceids: list type and element type is string, the maintenanceid need to be deleted\n \n @ret:\n ret: list or dict type, if call zabbix api error occurs, then type is dict else is list\n \"\"\"\n d_params = maintenanceids\n ret = self.call_api('maintenance.delete', d_params).get('maintenanceids') or self.call_api('maintenance.delete', d_params)\n return ret\n\n def get(self, maintenanceids = []):\n \"\"\"\n @function:\n Get maintenance window info\n\n @param:\n maintenanceids: list type and element type is string,\n\n @ret:\n ret: list or dict type, if call zabbix api error occurs, then type is dict else is list\n \"\"\"\n g_params = {\n 'maintenanceids': maintenanceids,\n 'output': 'extend',\n 'selectGroups': 'extend',\n 'selectHosts': ['host', 'status'],\n 'selectTimeperiods': 'extend'\n }\n\n ret = self.call_api('maintenance.get', g_params)\n return ret\n\n def update(self, maintenanceid, hostids, since, till):\n \"\"\"\n @function:\n update maintenance window\n\n @param:\n maintenanceid: string type, the maintenanceid to be updated\n hostids: list type and element type is int, the hosts need to be maintenanced\n since: int(the seconds since 1970), maintenance window start time\n till: int (the seconds since 1970), maintenance window end time\n\n @ret:\n ret: list or dict type, if call zabbix api error occurs, then type is dict else is list\n \"\"\"\n start = int(since) + 3\n period = int(till) - int(since)\n u_params = {\n 'maintenanceid': str(maintenanceid),\n 'active_since': since,\n 'active_till': till,\n 'hostids': hostids,\n 'timeperiods': [\n {\n 'start_date': start,\n 'period': period\n }\n ]\n }\n\n ret = self.call_api('maintenance.update', u_params).get('maintenanceids') or self.call_api('maintenance.update', u_params)\n return ret\n\n def get_hostids(self, host_dn_list):\n \"\"\"\n @function:\n Get hostids depending on the host domain name\n \n @param:\n host_dn_list: list type and element type is string\n \n @ret:\n hostids: list type and element type is int\n \"\"\"\n hostids = []\n host_params = {\n 'filter': {\n 'host': host_dn_list\n }\n }\n results_set = self.call_api('host.get', host_params)\n for item in results_set:\n hostids.append(item['hostid'])\n\n return hostids\n\ndef gen_domainname(dict):\n \"\"\"\n Generate domain name list depending on the ip list\n \"\"\"\n dn_prefix = 'zoom'\n dn_postfix = '.zoom.us'\n dns_list = []\n \n # if dict.keys()[0] == 'mra':\n # return ['zoomssc52mra.zipow.com']\n \n for st in dict.keys():\n for ip in dict[st]:\n last_oct = ip.split('.')[3]\n \n # -------------sc-------------\n # 162.255.37.0/25 & 162.255.37.128/25\n if ip.startswith('162.255.37.'):\n if last_oct in [67, 120]:\n dn = 'zoomsc' + str(last_oct) + 'mmr.zoom.us'\n else:\n dn = dn_prefix + 'sc' + last_oct + st + dn_postfix\n # 192.204.12.0/24 – \"n\" - NTT\n elif ip.startswith('192.204.12.'):\n dn = dn_prefix + 'sc' + 'n' + last_oct + st + dn_postfix\n # 192.204.13.0/24 - \"m\" - NTT\n elif ip.startswith('192.204.13.'):\n dn = dn_prefix + 'sc' + 'm' + last_oct + st + dn_postfix\n # 204.141.28.0/24 - \"s\" - NTT\n elif ip.startswith('204.141.28.'):\n dn = dn_prefix + 'sc' + 's' + last_oct + st + dn_postfix\n # 204.141.29.0/24 - \"t\" - NTT\n elif ip.startswith('204.141.29.'):\n if last_oct in [51, 52, 53, 159, 160, 161]:\n dn = 'zoomsct' + str(last_oct) + 'pc.zoom.us'\n else:\n dn = dn_prefix + 'sc' + 't' + last_oct + st + dn_postfix\n\n # -------------ny-------------\n # 162.255.36.0/25 & 162.255.36.128/25\n elif ip.startswith('162.255.36.'):\n if last_oct in [67, 120]:\n dn = 'zoomny' + str(last_oct) + 'mmr.zoom.us'\n elif last_oct in [124, 125]:\n dn = 'zoomny' + str(last_oct) + 'bcz.zoom.us'\n else:\n dn = dn_prefix + 'ny' + last_oct + st + dn_postfix\n # 192.204.14.0/24 - \"n\" - NTT\n elif ip.startswith('192.204.14.'):\n dn = dn_prefix + 'ny' + 'n' + last_oct + st + dn_postfix\n # 192.204.15.0/24 - \"m\" - NTT\n elif ip.startswith('192.204.15.'):\n if last_oct in [63, 171]:\n dn = 'zoomnym' + str(last_oct) + 'pc.zoom.us'\n else:\n dn = dn_prefix + 'ny' + 'm' + last_oct + st + dn_postfix\n # 204.141.30.0/24 -\"s\" - NTT\n elif ip.startswith('204.141.30.'):\n dn = dn_prefix + 'ny' + 's' + last_oct + st + dn_postfix\n # 204.141.31.0/24 - \"t\" - NTT\n elif ip.startswith('204.141.31.'):\n if last_oct in [51, 52, 53, 159, 160, 161]:\n dn = 'zoomnyt' + str(last_oct) + 'pc.zoom.us'\n else:\n dn = dn_prefix + 'ny' + 't' + last_oct + st + dn_postfix\n\n # -------------tj-------------\n # 221.122.88.128/25 & \n elif ip.startswith('221.122.88.'):\n if last_oct in [161, 162, 163, 164, 175, 176, 177, 178]:\n dn = dn_prefix + 'tj' + last_oct + 'bcs' + dn_postfix\n elif last_oct in [155, 156, 157, 158, 171, 172, 173, 174]:\n dn = dn_prefix + 'tj' + last_oct + 'crc' + dn_postfix\n else:\n dn = dn_prefix + 'tj' + last_oct + st + dn_postfix\n\n # 221.122.89.128/25\n elif ip.startswith('221.122.89.'):\n dn = dn_prefix + 'tj' + 'n' + last_oct + st + dn_postfix\n\n # -------------mb-------------\n # 115.114.131.0/26\n elif ip.startswith('115.114.131.'):\n if last_oct in [20, 21, 22, 23, 32, 33, 34, 35]:\n dn = 'zoommb' + last_oct + 'bcs.zoom.us'\n elif last_oct in [36, 37, 38, 39]:\n dn = 'zoommb' + last_oct + 'crc.zoom.us'\n elif last_oct in [5, 6, 41, 52, 53, 54, 55, 56, 57, 58, 59]:\n dn = ''\n else:\n dn = dn_prefix + 'mb' + last_oct + st + dn_postfix\n\n # -------------am-------------\n # 213.19.144.0/24\n elif ip.startswith('213.19.144'):\n if last_oct in [16, 17, 18, 19, 20, 21, 22, 23, 32, 33, 34, 35, 52, 53, 54, 55, 56, 57, 58, 59, 68, 69, 70, 71]:\n dn = 'zoomam' + last_oct + 'bcs.zoom.us'\n else:\n dn = dn_prefix + 'am' + last_oct + st + dn_postfix\n # 213.19.153.0/24\n elif ip.startswith('213.19.153'):\n dn = ''\n\n # -------------sy-------------\n # 202.177.207.128/27\n elif ip.startswith('202.177.207'):\n if last_oct in range(133, 139):\n dn = 'zoomsy' + last_oct + 'bcs.zoom.us'\n else:\n dn = dn_prefix + 'sy' + last_oct + st + dn_postfix\n # 202.177.213.96/27\n elif ip.startswith('202.177.213.'):\n if last_oct in range(101, 107):\n dn = 'zoomsy' + last_oct + 'bcs.zoom.us'\n else:\n dn = dn_prefix + 'sy' + last_oct + st + dn_postfix\n\n # -------------hk-------------\n # 209.9.211.0/24\n elif ip.startswith('209.9.211.'):\n if last_oct in [16, 17, 18, 19, 20, 21, 22, 23, 32, 33, 34, 35, 52, 53, 54, 55, 56, 57, 58, 59, 68, 69, 70, 71]:\n dn = 'zoomhk' + last_oct + 'bcs.zoom.us'\n else:\n dn = dn_prefix + 'hk' + last_oct + st + dn_postfix\n # 209.9.215.0/24\n elif ip.startswith('209.9.215.'):\n dn = ''\n\n # -------------sp-------------\n # 64.211.144.0/24\n elif ip.startswith('64.211.144.'):\n if last_oct in [3, 4, 5, 6, 7, 8, 30, 31, 32, 33, 34, 35, 133, 134, 135, 136, 137, 138, 161, 162, 163, 164, 165, 166]:\n dn = 'zoomsp' + last_oct + 'bcs.zoom.us'\n else:\n dn = dn_prefix + 'sp' + last_oct + st + dn_postfix\n\n # -------------sp-------------\n # 69.174.57.0/24\n elif ip.startswith('69.174.57.'):\n dn = dn_prefix + 'tr' + last_oct + st + dn_postfix\n\n # -------------dv-------------\n # 8.5.128.0/23\n elif ip.startswith('8.5.128.'):\n dn = dn_prefix + 'dv' + 'n' + last_oct + st + dn_postfix\n elif ip.startswith('8.5.129.'):\n dn = dn_prefix + 'dv' + 'm' + last_oct + st + dn_postfix\n # 162.255.38.0/24\n elif ip.startswith('162.255.38.'):\n dn = dn_prefix + 'dv' + last_oct + st + dn_postfix\n else:\n dn = ''\n\n if dn:\n dns_list.append(dn)\n \n return dns_list\n\n\ndef gen_hostids(dict):\n mmmttt = Maintenance('http://zabbix.zipow.com/zabbix', 'jerry', 'jerry.wang')\n domainname_list = gen_domainname(dict)\n hostids = mmmttt.get_hostids(domainname_list)\n\n return hostids\n \ndef create_maintenancewidow(dict, since, till):\n logging.info('Create maintenance window')\n logging.info('Init Maintenance instance')\n mmmttt = Maintenance('http://zabbix.zipow.com/zabbix', 'jerry', 'jerry.wang')\n \n domainname_list = gen_domainname(dict)\n logging.info('domain name list: %s' %domainname_list)\n \n hostids = mmmttt.get_hostids(domainname_list)\n logging.info('maintenance host ids: %s' %hostids)\n \n mtids = mmmttt.create(hostids, 'jerry_' + time.strftime('%Y%m%d%H%M',time.localtime()), since, till)\n if type(mtids) == type([]):\n logging.info('maintenance window created successfully, and maintenance id is: %s' %mtids)\n elif type(mtids) == type({}):\n logging.error('creating maintenance windows failed, error info: %s' %mtids)\n \n\nif __name__ == '__main__':\n create_maintenancewidow({'mra': ['165.254.88.52']}, 1518305600, 1518392000)\n #mt = Maintenance('http://zabbix.zipow.com/zabbix', 'jerry', 'jerry.wang')\n #hostids = mt.get_hostids(['zoomssc52mra.zipow.com', 'zoomssc54mra.zipow.com'])\n #mtids = mt.create(hostids, 'jerry', 1518105600, 1518192000)\n #print(mt.get(mtids))\n #time.sleep(30)\n #mt.update(maintenanceid=mtids[0], hostids=hostids, since=1518109200, till=1518195600)\n #time.sleep(30)\n #mt.delete(mtids)\n\n #dict = {'mmr': ['162.255.37.188', '192.204.12.188', '192.204.13.230', '204.141.28.201', '204.141.29.69', '192.204.14.188', '192.204.15.228', '204.141.30.193', '204.141.31.69', '221.122.88.135', '221.122.89.131', '8.5.128.33', '8.5.129.33'], \n # 'zc': ['162.255.36.176', '115.114.131.42', '162.255.38.176'],\n # 'rc': ['213.19.144.42', '202.177.207.155', '209.9.211.42', '64.211.144.158', '69.174.57.158'],\n # 'rg': ['213.19.153.33', '202.177.213.113']}\n #print(gen_domainname(dict))\n ","sub_path":"zabbix_maintenance.py","file_name":"zabbix_maintenance.py","file_ext":"py","file_size_in_byte":16683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"305561035","text":"# -*- coding:utf-8 -*-\n__author__ = 'qing.cai@horizon.ai'\n\nfrom channels.layers import get_channel_layer\nfrom asgiref.sync import async_to_sync\nimport os\nchannel_layer = get_channel_layer()\nimport time\n\n\ndef to_ws_message(step, send_data, file_name):\n '''\n 发送给websocket数据\n :param step:\n :param send_data:\n :return:\n '''\n time.sleep(0.5)\n async_to_sync(channel_layer.group_send)(\n 'dag_'+str(os.path.basename(file_name).split('.py')[0]),\n {\n \"type\": \"send.message\",\n \"message\": {\"step\": \"%s\" % step, \"msg\": \"%s\" % send_data}\n }\n )\n\n","sub_path":"apps/utils/to_ws_data.py","file_name":"to_ws_data.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"283758970","text":"import datetime\n\nfrom django.db import models\n\n\nclass TodoList(models.Model):\n title = models.CharField(max_length = 250)\n done = models.BooleanField(default = False)\n author_ip = models.CharField(blank = True, max_length = 25)\n created_date = models.CharField(default = datetime.datetime.utcnow().strftime(\"%Y-%m-%d, %H:%M:%S\"),\n max_length = 50)\n done_date = models.CharField(blank = True, null = True, max_length = 50)\n\n def save(self, *args, **kwargs):\n\n if self.done is True and not self.done_date:\n self.done_date = datetime.datetime.utcnow().strftime(\"%Y-%m-%d, %H:%M:%S\")\n\n if self.pk is not None:\n orig = TodoList.objects.get(pk = self.pk)\n if orig.done is True and self.done is False:\n self.done_date = None\n\n super(TodoList, self).save(*args, **kwargs)\n","sub_path":"Zadanie1/Zadanie1_cbv/Todo_list/todo/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"154923759","text":"## 3. Сохранить статью \"лингвистика\" Википедии в файл в формате UTF-8. Программа должна читать этот файл и\n## заменять в нём все формы слова \"язык\" на соответствующие формы слова \"шашлык\".\n## То, что получится, она должна записывать в другой текстовый файл.\n## Заменяться должны только формы этого слова. Т. е. если Вам нужно заменить слово \"кит\" на слово \"кот\",\n## слово \"китовый\" на слово \"котовый\" заменяться не должно. При замене нужно пользоваться функцией re.sub.\n## Если слово было написано с большой буквы, то и после замены оно должно быть написано с большой буквы. \n\nimport re\n\ndef main():\n with open('Лингвистика.txt', 'r', encoding = 'utf-8') as f:\n text = f.read()\n lang = 'язык((?:а(?:ми?|х)?)|и|о(?:в|м)|у|е)?([\\s,.!\\?:\"\\(\\)\\'»])'\n Lang = 'Язык((?:а(?:ми?|х)?)|и|о(?:в|м)|у|е)?([\\s,.!\\?:\"\\(\\)\\'»])'\n new_text = re.sub(lang,'шашлык\\\\1\\\\2', text)\n new_text = re.sub(Lang,'Шашлык\\\\1\\\\2', new_text)\n with open('Новая лингвистика.txt', 'w', encoding = 'utf-8') as f:\n f.write(new_text)\n\nif __name__ == '__main__':\n main()\n","sub_path":"hw11/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":1566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"264380172","text":"# dont forget to consider when array has negative value.\n# otherwise can use two pointer to solve it.\n\n\n# Given an array of integers and an integer k, you need to find the total number of continuous subarrays whose sum equals to k.\n\n\n\nfrom collections import defaultdict\nclass Solution(object):\n def subarraySum(self, nums, k):\n dic, res, prefix = defaultdict(lambda: 0),0,[]\n for i in range(len(nums)):\n if len(prefix)==0: prefix.append(nums[i])\n else: prefix.append(prefix[i-1]+nums[i])\n if prefix[-1]==k: res+=1 # dont forget!!!!\n if prefix[-1] in dic: res += dic[prefix[-1]]\n dic[prefix[-1]+k]+=1\n return res\n\n\n","sub_path":"medium/mediumCode/Array/****SubArraySumEqualsK.py","file_name":"****SubArraySumEqualsK.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"232162912","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nimport numpy as np\nimport chainer\nfrom chainer import cuda\nfrom chainer import optimizers, serializers\nfrom tqdm import tqdm\nimport sys\nimport os\nfrom collections import Counter\nimport math\nimport pickle\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport csv\nimport matplotlib.gridspec as gridspec\n\nimport time\nimport glob\nimport argparse\nimport shutil\nimport random\nfrom pathlib import Path\nfrom nmt_config import *\nfrom enc_dec_batch import *\n\n\nclass Translator(object):\n\n def __init__(self, args):\n self.exp_name = args.exp if 'exp' in args else \"undefined\"\n self.epochs_num = args.epochs if 'epochs' in args else \"undefined\"\n\n # CPU or GPU\n self.gpuid = args.gpuid if 'gpuid' in args else -1\n self.xp = cuda.cupy if self.gpuid >= 0 else np\n\n # Load integer id mappings\n if 'path' in args:\n self.set_paths(args.path)\n elif 'mpath' in args:\n self.mpath = args.mpath\n # set the workspace path to two levels up\n path = Path(self.mpath).parents[1]\n self.set_paths(str(path))\n self.model_path = self.mpath\n else:\n raise Exception(\n \"It was impossible to determine the workspace path. Use \\'path\\' or \\'mparh\\' arguments!\")\n\n self.w2i = pickle.load(open(self.w2i_path, \"rb\"))\n self.i2w = pickle.load(open(self.i2w_path, \"rb\"))\n self.vocab_size_en = min(len(self.i2w[\"en\"]), max_vocab_size[\"en\"])\n self.vocab_size_fr = min(len(self.i2w[\"fr\"]), max_vocab_size[\"fr\"])\n print(\"vocab size, en={0:d}, fr={1:d}\".format(\n self.vocab_size_en, self.vocab_size_fr))\n\n self.model, self.optimizer = self.get_model()\n\n def set_paths(self, input_path):\n input_dir = os.path.join(input_path)\n\n self.bucket_data_fname = os.path.join(input_dir, \"buckets_{0:d}.list\")\n self.tokens_fname = os.path.join(input_dir, \"tokens.list\")\n self.w2i_path = os.path.join(input_dir, \"w2i.dict\")\n self.i2w_path = os.path.join(input_dir, \"i2w.dict\")\n self.text_fname = {\"en\": os.path.join(\n input_dir, \"text.en\"), \"fr\": os.path.join(input_dir, \"text.fr\")}\n self.model_path = self.get_model_dirname(input_path)\n\n def get_model_dirname(self, input_path):\n '''Generates directory name based on the model attributes.'''\n\n dir_name = \"{0}_e{1}\".format(\n self.exp_name,\n self.epochs_num)\n\n model_path = os.path.join(input_path, \"models\", dir_name)\n print(\"Setting model path to:\\n\\t{0:s}\".format(model_path))\n print(\"{0:s}\".format(\"-\" * 50))\n\n return model_path\n\n def get_logpaths(self):\n log_train_fil_name = os.path.join(\n self.model_path, \"train.log\".format())\n log_dev_fil_name = os.path.join(\n self.model_path, \"dev.log\".format())\n\n return log_train_fil_name, log_dev_fil_name\n\n def get_model(self):\n '''Set up model'''\n model = EncoderDecoder(self.vocab_size_fr, self.vocab_size_en,\n num_layers_enc, num_layers_dec,\n hidden_units, self.gpuid, attn=use_attn)\n if self.gpuid >= 0:\n cuda.get_device(self.gpuid).use()\n model.to_gpu()\n\n optimizer = optimizers.Adam()\n optimizer.setup(model)\n optimizer.add_hook(chainer.optimizer.GradientClipping(threshold=5))\n\n return model, optimizer\n\n def compute_prec_recall(self):\n metrics = self.predict(s=NUM_TRAINING_SENTENCES,\n num=NUM_DEV_SENTENCES, display=False, plot=False)\n\n prec = np.sum(metrics[\"cp\"]) / np.sum(metrics[\"tp\"])\n rec = np.sum(metrics[\"cp\"]) / np.sum(metrics[\"t\"])\n f_score = 2 * (prec * rec) / (prec + rec)\n\n print(\"{0:s}\".format(\"-\" * 50))\n print(\"{0:s} | {1:0.4f}\".format(\"precision\", prec))\n print(\"{0:s} | {1:0.4f}\".format(\"recall\", rec))\n print(\"{0:s} | {1:0.4f}\".format(\"f1\", f_score))\n\n def compute_dev_pplx(self):\n loss = 0\n num_words = 0\n with open(self.text_fname[\"fr\"], \"rb\") as fr_file, open(self.text_fname[\"en\"], \"rb\") as en_file:\n with tqdm(total=NUM_DEV_SENTENCES) as pbar:\n sys.stderr.flush()\n out_str = \"loss={0:.6f}\".format(0)\n pbar.set_description(out_str)\n for i, (line_fr, line_en) in enumerate(zip(fr_file, en_file), start=1):\n if i > NUM_TRAINING_SENTENCES and i <= (NUM_TRAINING_SENTENCES + NUM_DEV_SENTENCES):\n fr_sent = line_fr.strip().split()\n en_sent = line_en.strip().split()\n\n fr_ids = [self.w2i[\"fr\"].get(\n w, UNK_ID) for w in fr_sent]\n en_ids = [self.w2i[\"en\"].get(\n w, UNK_ID) for w in en_sent]\n\n # compute loss\n curr_loss = float(self.model.encode_decode_train(\n fr_ids, en_ids, train=False).data)\n loss += curr_loss\n num_words += len(en_ids)\n\n out_str = \"loss={0:.6f}\".format(curr_loss)\n pbar.set_description(out_str)\n pbar.update(1)\n\n # end of for\n # end of pbar\n # end of with open file\n loss_per_word = loss / num_words\n pplx = 2 ** loss_per_word\n # random_pplx = self.vocab_size_en\n\n print(\"{0:s}\".format(\"-\" * 50))\n print(\"{0:s} | {1:0.6f}\".format(\"dev perplexity\", pplx))\n print(\"{0:s} | {1:6d}\".format(\"# words in dev\", num_words))\n print(\"{0:s}\".format(\"-\" * 50))\n\n return pplx\n\n # ### Evaluation\n #\n # Bleu score\n\n def bleu_stats(self, hypothesis, reference):\n yield len(hypothesis)\n yield len(reference)\n for n in range(1, 5):\n s_ngrams = Counter([tuple(hypothesis[i:i + n])\n for i in range(len(hypothesis) + 1 - n)])\n r_ngrams = Counter([tuple(reference[i:i + n])\n for i in range(len(reference) + 1 - n)])\n yield max([sum((s_ngrams & r_ngrams).values()), 0])\n yield max([len(hypothesis) + 1 - n, 0])\n\n # Compute BLEU from collected statistics obtained by call(s) to bleu_stats\n def bleu(self, stats):\n if len(list(filter(lambda x: x == 0, stats))) > 0:\n return 0\n (c, r) = stats[:2]\n log_bleu_prec = sum([math.log(float(x) / y)\n for x, y in zip(stats[2::2], stats[3::2])]) / 4.\n return math.exp(min([0, 1 - float(r) / c]) + log_bleu_prec)\n\n def compute_dev_bleu(self):\n list_of_references = []\n list_of_hypotheses = []\n with open(self.text_fname[\"fr\"], \"rb\") as fr_file, open(self.text_fname[\"en\"], \"rb\") as en_file:\n with tqdm(total=NUM_DEV_SENTENCES) as pbar:\n sys.stderr.flush()\n for i, (line_fr, line_en) in enumerate(zip(fr_file, en_file), start=1):\n if i > NUM_TRAINING_SENTENCES and i <= (NUM_TRAINING_SENTENCES + NUM_DEV_SENTENCES):\n\n # out_str = \"predicting sentence={0:d}\".format(i)\n pbar.update(1)\n\n fr_sent = line_fr.strip().split()\n # en_sent = line_en.strip().split()\n\n fr_ids = [self.w2i[\"fr\"].get(\n w, UNK_ID) for w in fr_sent]\n # en_ids = [self.w2i[\"en\"].get(\n # w, UNK_ID) for w in en_sent]\n\n # list_of_references.append(line_en.strip().split().decode())\n reference_words = [w.decode()\n for w in line_en.strip().split()]\n list_of_references.append(reference_words)\n pred_sent, alpha_arr = self.model.encode_decode_predict(\n fr_ids)\n pred_words = [self.i2w[\"en\"][w].decode()\n for w in pred_sent if w != EOS_ID]\n # pred_sent_line = \" \".join(pred_words)\n # list_of_hypotheses.append(pred_sent_line)\n list_of_hypotheses.append(pred_words)\n if i > (NUM_TRAINING_SENTENCES + NUM_DEV_SENTENCES):\n break\n\n stats = [0 for i in range(10)]\n for (r, h) in zip(list_of_references, list_of_hypotheses):\n stats = [sum(scores)\n for scores in zip(stats, self.bleu_stats(h, r))]\n print(\"BLEU: %0.2f\" % (100 * self.bleu(stats)))\n\n return (100 * self.bleu(stats))\n\n # ### Training loop\n def batch_train_loop(self, model_path, bucket_fname, num_epochs,\n batch_size=10, num_buckets=NUM_BUCKETS,\n num_training=2,\n bucket_width=BUCKET_WIDTH, log_mode=\"a\", last_epoch_id=0):\n\n log_train_fil_name, log_dev_fil_name = self.get_logpaths()\n\n # Set up log file for loss\n log_train_fil = open(log_train_fil_name, mode=log_mode)\n log_train_csv = csv.writer(log_train_fil, lineterminator=\"\\n\")\n log_dev_fil = open(log_dev_fil_name, mode=log_mode)\n\n sys.stderr.flush()\n\n def load_bucket_data():\n for buck_indx in range(num_buckets):\n bucket_data = pickle.load(\n open(self.bucket_data_fname.format(buck_indx + 1), \"rb\"))\n buck_pad_lim = (buck_indx + 1) * bucket_width\n\n for i in range(0, len(bucket_data), batch_size):\n minibatch = bucket_data[i:i + batch_size]\n yield (minibatch, buck_pad_lim)\n\n for epoch in range(num_epochs):\n train_count = 0\n with tqdm(total=num_training) as pbar:\n sys.stderr.flush()\n loss_per_epoch = 0\n out_str = \"epoch={0:d}, iter={1:d}, loss={2:.4f}, mean loss={3:.4f}, bucket={4:d}\".format(\n epoch + 1, 0, 0, 0, 0)\n pbar.set_description(out_str)\n\n bucket_data = list(load_bucket_data())\n random.shuffle(bucket_data)\n\n for idx, val in enumerate(bucket_data):\n minibatch, padding = val\n if train_count >= num_training:\n break\n\n curr_len = len(minibatch)\n\n loss = self.model.encode_decode_train_batch(\n minibatch, padding, padding)\n train_count += curr_len\n\n # set up for backprop\n self.model.cleargrads()\n loss.backward()\n # update parameters\n self.optimizer.update()\n # store loss value for display\n loss_val = float(loss.data)\n loss_per_epoch += loss_val\n\n it = (epoch * NUM_TRAINING_SENTENCES) + curr_len\n\n out_str = \"epoch={0:d}, iter={1:d}, loss={2:.4f}, mean loss={3:.4f}\".format(\n epoch + 1, it, loss_val, (loss_per_epoch / (idx + 1)))\n pbar.set_description(out_str)\n pbar.update(curr_len)\n\n # log every 10 batches\n if idx % 10 == 0:\n log_train_csv.writerow([it, loss_val])\n\n print(\"finished training on {0:d} sentences\".format(num_training))\n print(\"{0:s}\".format(\"-\" * 50))\n\n self.save_model(os.path.join(\n model_path, 'epochs'), suffix=\"_\" + str(epoch + 1))\n\n # write DONE flag\n open(os.path.join(model_path, 'epochs',\n \"{0}.done\".format(epoch + 1)), 'a').close()\n\n # print(\"computing perplexity\")\n # pplx_new = compute_dev_pplx()\n # print(\"Saving model\")\n # serializers.save_npz(model_fil.replace(\n # \".model\", \"_{0:d}.model\".format(last_epoch_id + epoch + 1)), model)\n # print(\"Finished saving model\")\n # pplx = pplx_new\n # print(log_train_fil_name)\n # print(log_dev_fil_name)\n # print(model_fil.replace(\".model\", \"_{0:d}.model\".format(epoch + 1)))\n\n # if epoch % 2 == 0:\n # bleu_score = compute_dev_bleu()\n #\n # # log pplx and bleu score\n # log_dev_csv.writerow(\n # [(last_epoch_id + epoch + 1), pplx_new, bleu_score])\n # log_train_fil.flush()\n # log_dev_fil.flush()\n # print(\"Simple predictions (╯°□°)╯︵ ┻━┻\")\n # print(\"training set predictions\")\n # _ = predict(s=0, num=2, plot=False)\n # print(\"Simple predictions (╯°□°)╯︵ ┻━┻\")\n # print(\"dev set predictions\")\n # _ = predict(s=NUM_TRAINING_SENTENCES, num=3, plot=False)\n # print(\"{0:s}\".format(\"-\" * 50))\n # compute_dev_bleu()\n # print(\"{0:s}\".format(\"-\" * 50))\n #\n # print(\"Final saving model\")\n # serializers.save_npz(model_fil, model)\n # print(\"Finished saving model\")\n\n # Backup model\n self.save_model(model_path)\n\n # close log file\n log_train_fil.close()\n log_dev_fil.close()\n\n # ### Utilities\n\n def plot_attention(self, alpha_arr, fr, en, plot_name=None):\n if self.gpuid >= 0:\n alpha_arr = cuda.to_cpu(alpha_arr).astype(np.float32)\n\n fig = plt.figure()\n fig.set_size_inches(8, 8)\n\n gs = gridspec.GridSpec(2, 2, width_ratios=[\n 12, 1], height_ratios=[12, 1])\n\n ax = plt.subplot(gs[0])\n ax_c = plt.subplot(gs[1])\n\n cmap = sns.light_palette((200, 75, 60), input=\"husl\", as_cmap=True)\n ax = sns.heatmap(alpha_arr, xticklabels=fr, yticklabels=en,\n ax=ax, cmap=cmap, cbar_ax=ax_c)\n\n ax.xaxis.tick_top()\n ax.yaxis.tick_right()\n\n ax.set_xticklabels(en, minor=True, rotation=60, size=12)\n for label in ax.get_xticklabels(minor=False):\n label.set_fontsize(12)\n # label.set_font_properties(prop)\n\n for label in ax.get_yticklabels(minor=False):\n label.set_fontsize(12)\n label.set_rotation(-90)\n label.set_horizontalalignment('left')\n\n ax.set_xlabel(\"Source\", size=20)\n ax.set_ylabel(\"Hypothesis\", size=20)\n\n if plot_name:\n fig.savefig(plot_name, format=\"png\")\n\n def predict_sentence(self, line_num, line_fr, line_en=None, display=True, plot_name=None, p_filt=0, r_filt=0):\n fr_sent = line_fr.strip().split()\n fr_ids = [self.w2i[\"fr\"].get(w, UNK_ID) for w in fr_sent]\n # english reference is optional. If provided, compute precision/recall\n if line_en:\n en_sent = line_en.strip().split()\n en_ids = [self.w2i[\"en\"].get(w, UNK_ID) for w in en_sent]\n\n pred_ids, alpha_arr = self.model.encode_decode_predict(fr_ids)\n pred_words = [self.i2w[\"en\"][w].decode() for w in pred_ids]\n\n prec = 0\n rec = 0\n filter_match = False\n\n matches = self.count_match(en_ids, pred_ids)\n if EOS_ID in pred_ids:\n pred_len = len(pred_ids) - 1\n else:\n pred_len = len(pred_ids)\n # subtract 1 from length for EOS id\n prec = (matches / pred_len) if pred_len > 0 else 0\n rec = matches / len(en_ids)\n\n if display and (prec >= p_filt and rec >= r_filt):\n filter_match = True\n # convert raw binary into string\n fr_words = [w.decode() for w in fr_sent]\n\n print(\"{0:s}\".format(\"-\" * 50))\n print(\"sentence: {0:d}\".format(line_num))\n print(\"{0:s} | {1:80s}\".format(\"Src\", line_fr.strip().decode()))\n print(\"{0:s} | {1:80s}\".format(\"Ref\", line_en.strip().decode()))\n print(\"{0:s} | {1:80s}\".format(\"Hyp\", \" \".join(pred_words)))\n\n print(\"{0:s}\".format(\"-\" * 50))\n\n print(\"{0:s} | {1:0.4f}\".format(\"precision\", prec))\n print(\"{0:s} | {1:0.4f}\".format(\"recall\", rec))\n\n if plot_name and use_attn:\n self.plot_attention(alpha_arr, fr_words, pred_words, plot_name)\n\n return matches, len(pred_ids), len(en_ids), filter_match\n\n def predict(self, s=NUM_TRAINING_SENTENCES, num=NUM_DEV_SENTENCES, display=True, plot=False, p_filt=0, r_filt=0):\n print(\"English predictions, s={0:d}, num={1:d}:\".format(s, num))\n\n metrics = {\"cp\": [], \"tp\": [], \"t\": []}\n\n filter_count = 0\n\n with open(self.text_fname[\"fr\"], \"rb\") as fr_file, open(self.text_fname[\"en\"], \"rb\") as en_file:\n for i, (line_fr, line_en) in enumerate(zip(fr_file, en_file), start=0):\n if i >= s and i < (s + num):\n if plot:\n plot_name = os.path.join(\n self.model_path, \"sample_{0:d}_plot.png\".format(i + 1))\n else:\n plot_name = None\n\n # make prediction\n cp, tp, t, f = self.predict_sentence(i, line_fr,\n line_en,\n display=display,\n plot_name=plot_name,\n p_filt=p_filt, r_filt=r_filt)\n metrics[\"cp\"].append(cp)\n metrics[\"tp\"].append(tp)\n metrics[\"t\"].append(t)\n filter_count += (1 if f else 0)\n\n print(\"sentences matching filter = {0:d}\".format(filter_count))\n return metrics\n\n def count_match(self, list1, list2):\n # each list can have repeated elements. The count should account for\n # this.\n count1 = Counter(list1)\n count2 = Counter(list2)\n count2_keys = count2.keys() - set([UNK_ID, EOS_ID])\n common_w = set(count1.keys()) & set(count2_keys)\n matches = sum([min(count1[w], count2[w]) for w in common_w])\n return matches\n\n def train_start(self):\n max_epoch_id = 0\n # if os.path.exists(model_path):\n # # check last saved epoch model:\n # for fname in [f for f in os.listdir(model_dir) if f.endswith(\"\")]:\n # if model_path != os.path.join(model_dir, fname) and model_path.replace(\".model\", \"\") in os.path.join(model_dir, fname):\n # try:\n # epoch_id = int(fname.split(\n # \"_\")[-1].replace(\".model\", \"\"))\n # if epoch_id > max_epoch_id:\n # max_epoch_id = epoch_id\n # except:\n # print(\"{0:s} not a valid model file\".format(fname))\n # print(\"last saved epoch model={0:d}\".format(max_epoch_id))\n #\n # if load_existing_model:\n # print(\"loading model ...\")\n # serializers.load_npz(model_path, self.model)\n # print(\"finished loading: {0:s}\".format(model_path))\n # else:\n # print(\"\"\"model file already exists!!\n # Delete before continuing, or enable load_existing flag\"\"\".format(model_path))\n # return\n\n if not os.path.exists(self.model_path):\n print(\"Creating folder: {0}\".format(self.model_path))\n os.makedirs(self.model_path)\n else:\n recreate_folder(self.model_path)\n os.makedirs(os.path.join(self.model_path, 'epochs'))\n\n if self.epochs_num > 0:\n self.batch_train_loop(self.model_path, self.bucket_data_fname,\n num_epochs=self.epochs_num,\n batch_size=BATCH_SIZE,\n num_buckets=NUM_BUCKETS,\n num_training=NUM_TRAINING_SENTENCES,\n bucket_width=BUCKET_WIDTH, last_epoch_id=max_epoch_id)\n\n print(\"Done.\")\n\n def load_model(self, suffix=None):\n if suffix is None:\n filepath_model = os.path.join(self.mpath, 'model.npz')\n else:\n filepath_model = os.path.join(\n self.mpath, 'epochs', 'model{0}.npz'.format(suffix))\n\n if not os.path.exists(filepath_model):\n raise Exception('Model file not found: {0}'.format(filepath_model))\n\n print(\"Loading model ...\")\n serializers.load_npz(filepath_model, self.model)\n print(\"Finished loading: {0:s}\".format(filepath_model))\n print(\"{0:s}\".format(\"-\" * 50))\n\n def save_model(self, path, suffix=\"\"):\n filepath_model = os.path.join(path, 'model{0}.npz'.format(suffix))\n serializers.save_npz(filepath_model, self.model)\n print(\"Finished saving data to: {0}\".format(filepath_model))\n print(\"{0:s}\".format(\"-\" * 50))\n\n\n#########\n# UTILS\n#########\n\n\ndef recreate_folder(folder_path):\n if os.path.exists(folder_path):\n yes = set(['yes', 'y'])\n no = set(['no', 'n', ''])\n\n while(True):\n choice = input(\n \"A folder exists. Do you want to delete {0}? [y/N] \".format(folder_path)).lower()\n if choice in yes:\n shutil.rmtree(folder_path)\n break\n elif choice in no:\n break\n if not os.path.exists(folder_path):\n os.makedirs(folder_path)\n\n#########\n# RUN\n#########\n\n\ndef main_train(args):\n \"\"\"Subparser: train\n\n Start training a model with specified attributes\n \"\"\"\n def print_args(args):\n print(\"SETTINGS\")\n print(\">>> Experiment name: {0}\".format(args.exp))\n print(\">>> Workspace path: {0}\".format(args.path))\n print(\">>> Number of epochs: {0}\".format(args.epochs))\n print(\">>> GPU: {0}\".format(\"Off\" if args.gpuid < 0 else args.gpuid))\n print(\">>> Log level: {0}\".format(args.log))\n\n print(\"\\n{0:s}\\n\".format(\"-\" * 50))\n\n print_args(args)\n\n if args.epochs <= 0:\n raise Exception(\"Invalid number of epochs\")\n\n t = Translator(args)\n t.train_start()\n\n\ndef main_show(args):\n global NUM_DEV_SENTENCES\n if not args.smalldev:\n NUM_DEV_SENTENCES = NUM_SENTENCES - NUM_TRAINING_SENTENCES\n\n show_categories = args.fun.strip().lower().split(',')\n t = Translator(args)\n\n if args.epoch >= 0:\n t.load_model(\"_\" + str(args.epoch))\n\n if 'ts' in show_categories:\n print(\"Training set predictions\")\n t.predict(s=0, num=args.num, plot=True)\n print(\"{0:s}\".format(\"-\" * 50))\n\n if 'ds' in show_categories:\n print(\"dev set predictions\")\n t.predict(s=NUM_TRAINING_SENTENCES, num=args.num, plot=True)\n print(\"{0:s}\".format(\"-\" * 50))\n print(\"{0:s}\".format(\"-\" * 50))\n\n if 'bleu' in show_categories:\n print(\"computing bleu\")\n t.compute_dev_bleu()\n print(\"finished computing bleu ... \")\n print(\"{0:s}\".format(\"-\" * 50))\n\n if 'ppx' in show_categories:\n print(\"computing perplexity\")\n t.compute_dev_pplx()\n print(\"finished computing perplexity ... \")\n print(\"{0:s}\".format(\"-\" * 50))\n\n\ndef main_listen(args):\n def get_all_files_in_path(path, extension):\n listedfiles = [f for f in glob.glob(\n os.path.join(path, '*.' + extension))]\n return listedfiles\n\n global NUM_DEV_SENTENCES\n if not args.smalldev:\n NUM_DEV_SENTENCES = NUM_SENTENCES - NUM_TRAINING_SENTENCES\n\n show_categories = args.fun.strip().lower().split(',')\n used_files = set()\n printed = False\n while True:\n listed_files = get_all_files_in_path(\n os.path.join(args.mpath, 'epochs'), 'done')\n for done_file in listed_files:\n epoch_num = os.path.basename(done_file).split('.')[0]\n model_file = os.path.join(os.path.dirname(\n done_file), \"model_{0}.npz\".format(epoch_num))\n if model_file in used_files:\n continue\n\n printed = False\n used_files.add(model_file)\n\n with open(os.path.join(args.mpath, \"listen.out\"), \"a\") as f:\n f.write(\"{0}\\n\".format(model_file))\n if 'bleu' in show_categories:\n t = Translator(args)\n t.load_model(\"_\" + epoch_num)\n print(\"computing bleu\")\n bleu = t.compute_dev_bleu()\n f.write(\"BLEU: {0:.2f}\\n\".format(bleu))\n print(\"finished computing bleu ... \")\n print(\"{0:s}\".format(\"-\" * 50))\n\n if 'ppx' in show_categories:\n t = Translator(args)\n t.load_model(\"_\" + epoch_num)\n print(\"computing perplexity\")\n ppx = t.compute_dev_pplx()\n f.write(\"PPX: {0}\\n\".format(ppx))\n print(\"finished computing perplexity ... \")\n print(\"{0:s}\".format(\"-\" * 50))\n\n f.write(\"\\n\")\n\n if not printed:\n print(\"Waiting for new files...\")\n printed = True\n time.sleep(5)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n subparsers = parser.add_subparsers(dest='command')\n train_parser = subparsers.add_parser(\"train\")\n show_parser = subparsers.add_parser(\"show\")\n listen_parser = subparsers.add_parser(\"listen\")\n\n # train commands\n train_parser.add_argument('--exp', type=str,\n default=\"exp\", help='Experiment name')\n train_parser.add_argument('--path', type=str, help='Workspace path')\n train_parser.add_argument('--epochs', type=int,\n default=1, help='Number of epochs')\n train_parser.add_argument('--gpuid', type=int,\n default=-1, help='GPU enabled')\n train_parser.add_argument('--log', type=int,\n default=0, help='Log level')\n\n # show/analyse commands\n show_parser.add_argument(\n '--mpath', type=str, help='Model path')\n show_parser.add_argument(\n '--fun', type=str, default=\"bleu\", help='batch size')\n show_parser.add_argument(\n '--num', type=int, default=3, help='Number of results')\n show_parser.add_argument(\n '--epoch', type=int, default=-1, help='Finds a specific epoch if exists')\n show_parser.add_argument(\n '--smalldev', type=bool, default=False, help='Sets devset size to 500')\n\n # listen commands\n listen_parser.add_argument('--mpath', type=str, help='Model path')\n listen_parser.add_argument(\n '--fun', type=str, default=\"bleu,ppx\", help='batch size')\n listen_parser.add_argument(\n '--smalldev', type=bool, default=False, help='Sets devset size to 500')\n\n # parse\n args = parser.parse_args()\n\n if args.command == \"train\":\n main_train(args)\n elif args.command == \"show\":\n main_show(args)\n elif args.command == \"listen\":\n main_listen(args)\n else:\n raise Exception(\"Target not found!\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"nmt_translate.py","file_name":"nmt_translate.py","file_ext":"py","file_size_in_byte":27546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"213821665","text":"from django.conf.urls import url, include\nfrom django.contrib import admin\nfrom quote import urls\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^quote/', include('quote.urls')),\n url(r'^provider/', include('provider.urls')),\n url(r'^product/', include('product.urls')),\n url(r'^location/', include('location.urls')),\n]","sub_path":"simuladoresWEB/simuladoresWEB/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"22613042","text":"\n# ref https://github.com/wkentaro/labelme/blob/master/labelme/cli/json_to_dataset.py\n\nimport json\nimport os\nimport os.path as osp\n\nimport imgviz\nimport PIL.Image\nimport numpy as np\n\nfrom labelme import utils\n\nJSON_DIR = './labelme/data/labeled/'\nSAVE_DIR = './labelme/data/dataset/'\n\ndef main(JSON_DIR, SAVE_DIR):\n\n # read .json file list\n _, _, jsons = next(os.walk(JSON_DIR))\n jsons = [s for s in jsons if \".json\" in s]\n \n # take the label_names.txt\n with open(osp.join(JSON_DIR, \"label_names.txt\"), \"r\") as f:\n cnt = 0\n label_name_to_value = {}\n for line in f:\n label_name_to_value[line.rstrip('\\n')] = cnt\n cnt += 1\n \n for json_file in jsons:\n \n # read json\n data = json.load(open(JSON_DIR + json_file))\n \n # read image\n imageData = data.get(\"imageData\")\n \n if not imageData:\n imagePath = os.path.join(JSON_DIR, data[\"imagePath\"])\n img = np.asarray(PIL.Image.open(imagePath))\n else:\n img = utils.img_b64_to_arr(imageData)\n \n with open(osp.join(JSON_DIR, \"label_names.txt\"), \"r\") as f:\n cnt = 0\n label_name_to_value = {}\n for line in f:\n label_name_to_value[line.rstrip('\\n')] = cnt\n cnt += 1\n \n # make a label data\n lbl, _ = utils.shapes_to_label(\n img.shape, data[\"shapes\"], label_name_to_value\n )\n \n # make a viz data\n label_names = [None] * (max(label_name_to_value.values()) + 1)\n for name, value in label_name_to_value.items():\n label_names[value] = name\n \n lbl_viz = imgviz.label2rgb(\n label=lbl, img=imgviz.asgray(img), label_names=label_names, loc=\"rb\"\n )\n \n # save dataset\n _, name, _ = json_file.replace('.', '_').split('_')\n \n PIL.Image.fromarray(img).save(osp.join(SAVE_DIR, \"img_\" + name + \".png\"))\n utils.lblsave(osp.join(SAVE_DIR, \"label_\" + name + \".png\"), lbl)\n PIL.Image.fromarray(lbl_viz).save(osp.join(SAVE_DIR, \"viz_\" + name + \".png\"))\n \n with open(osp.join(SAVE_DIR, \"label_names.txt\"), \"w\") as f:\n for lbl_name in label_names:\n f.write(lbl_name + \"\\n\")\n \nif __name__ == \"__main__\":\n\tmain(JSON_DIR, SAVE_DIR)\n","sub_path":"labelme/json2dataset.py","file_name":"json2dataset.py","file_ext":"py","file_size_in_byte":2378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"118618895","text":"import unittest\nfrom Products.CMFDefault.utils import parseHeadersBody\nfrom string import split\n\nclass DefaultUtilsTests(unittest.TestCase):\n COMMON_HEADERS = '''Author: Tres Seaver\nTitle: Test Products.PTKDemo.utils.parseHeadersBody'''\n\n MULTILINE_DESCRIPTION = '''Description: this description spans\n multiple lines.'''\n\n TEST_BODY = '''Body goes here, and can span multiple\nlines. It can even include \"headerish\" lines, like:\n\nHeader: value\n'''\n\n def setUp( self ):\n get_transaction().begin()\n\n def tearDown( self ):\n get_transaction().abort()\n \n def test_NoBody( self ):\n headers, body = parseHeadersBody( '%s\\n\\n' % self.COMMON_HEADERS )\n assert( len( headers ) == 2, '%d!' % len( headers ) )\n assert( 'Author' in headers.keys() )\n assert( headers[ 'Author' ] == 'Tres Seaver' )\n assert( 'Title' in headers.keys() )\n assert( len( body ) == 0, '%d!' % len( body ) )\n\n def test_Continuation( self ):\n headers, body = parseHeadersBody( '%s\\n%s\\n\\n'\n % ( self.COMMON_HEADERS\n , self.MULTILINE_DESCRIPTION\n )\n )\n assert( len( headers ) == 3, '%d!' % len( headers ) )\n assert( 'Description' in headers.keys() )\n desc_len = len( split( headers[ 'Description' ], '\\n' ) )\n assert( desc_len == 2, '%d!' % desc_len )\n assert( len( body ) == 0, '%d!' % len( body ) )\n \n def test_Body( self ):\n headers, body = parseHeadersBody( '%s\\n\\n%s'\n % ( self.COMMON_HEADERS\n , self.TEST_BODY\n )\n )\n assert( len( headers ) == 2, '%d!' % len( headers ) )\n assert( body == self.TEST_BODY )\n \n def test_Preload( self ):\n preloaded = { 'Author' : 'xxx', 'text_format' : 'structured_text' }\n headers, body = parseHeadersBody( '%s\\n%s\\n\\n%s'\n % ( self.COMMON_HEADERS\n , self.MULTILINE_DESCRIPTION\n , self.TEST_BODY\n )\n , preloaded\n )\n assert( len( headers ) == 3, '%d!' % len( headers ) )\n assert( preloaded[ 'Author' ] != headers[ 'Author' ] )\n assert( preloaded[ 'text_format' ] == headers[ 'text_format' ] )\n\ndef test_suite():\n return unittest.makeSuite(DefaultUtilsTests)\n\nif __name__ == '__main__':\n result = unittest.TextTestRunner().run(test_suite())\n\n","sub_path":"CMF/tags/CMF-1_2-release/CMFDefault/tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":2801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"389762687","text":"class Solution:\n # @param A : integer\n # @return an integer\n def reverse(self, A):\n negative_num = False\n if A < 0:\n A = -A\n negative_num = True\n remainder = 0\n while A:\n remainder = remainder*10 + A%10\n if remainder > (2**31)-1 or remainder < -(2**31):\n return 0\n A = A // 10\n if negative_num:\n remainder = -remainder\n return remainder\n\n\n\ns = Solution()\nprint(s.reverse(-1167285))\n","sub_path":"Algos/maths_algos/reverseInteger.py","file_name":"reverseInteger.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"174323359","text":"__author__ = 'Matt David'\n\nfrom attrdict import AttrDict\nfrom datetime import datetime, timedelta\n\nfrom addressimo.config import config\n\n# Used Datapoints:\n#\n# id\n#\n# bip70_static_amount\n# bip70_enabled\n# last_generated_index\n# last_used_index\n# master_public_key\n# master_public_key_source\n# private_key\n# private_key_source\n# private_key_id\n# x509_cert\n# x509_cert_source\n# paymentprotocol_only\n\nclass IdObject(AttrDict):\n\n def __init__(self, id=None):\n\n # Set ID\n self.id = id\n\n # Set Defaults\n self.bip32_enabled = False\n self.bip70_static_amount = None\n self.bip70_enabled = False\n self.paymentprotocol_only = False\n self.wallet_address = None\n self.expires = 0\n self.memo = None\n self.master_public_key = None\n self.private_key = None\n self.x509_cert = None\n self.payment_url = None\n self.merchant_data = None\n self.presigned_payment_requests = []\n self.presigned_only = False\n self.auth_public_key = None\n\n def get_expires(self):\n if self.expires:\n if isinstance(self.expires, int) or isinstance(self.expires, long):\n return int((datetime.utcnow() + timedelta(seconds=self.expires)).strftime('%s'))\n elif isinstance(self.expires, datetime):\n return int(self.expires.strftime('%s'))\n else:\n return int((datetime.utcnow() + timedelta(seconds=config.bip70_default_expiration)).strftime('%s'))\n","sub_path":"addressimo/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"116210799","text":"import numpy as np\nimport cv2\nfrom .lane_line import LaneLine\n\nclass LaneDetector:\n def __init__(self):\n # window settings\n self.window_height = 120 # Break image into 9 vertical layers since image height is 720\n self.window_width = 80\n self.margin = 80 # How much to slide left and right for searching\n self.lane_left = LaneLine();\n self.lane_right = LaneLine();\n\n def window_mask(self, img_ref, center, level):\n width = self.window_width\n height = self.window_height\n output = np.zeros_like(img_ref)\n output[int(img_ref.shape[0]-(level+1)*height):int(img_ref.shape[0]-level*height),max(0,int(center-width/2)):min(int(center+width/2),img_ref.shape[1])] = 1\n return output\n \n def find_window_centroids(self, warped):\n window_width = self.window_width;\n window_height = self.window_height;\n margin = self.margin;\n \n window_centroids = [] # Store the (left,right) window centroid positions per level\n window = np.ones(window_width) # Create our window template that we will use for convolutions\n\n # First find the two starting positions for the left and right lane by using np.sum to get the vertical image slice\n # and then np.convolve the vertical image slice with the window template \n\n # Sum quarter bottom of image to get slice, could use a different ratio\n l_sum = np.sum(warped[int(3*warped.shape[0]/4):,:int(warped.shape[1]/2)], axis=0)\n l_center = np.argmax(np.convolve(window,l_sum))-window_width/2\n r_sum = np.sum(warped[int(3*warped.shape[0]/4):,int(warped.shape[1]/2):], axis=0)\n r_center = np.argmax(np.convolve(window,r_sum))-window_width/2+int(warped.shape[1]/2)\n l_confidence = 1;\n r_confidence = 1;\n\n # Add what we found for the first layer\n window_centroids.append((window_height/2, l_center, r_center, l_confidence, r_confidence))\n\n # Go through each layer looking for max pixel locations\n for level in range(1,(int)(warped.shape[0]/window_height)):\n # convolve the window into the vertical slice of the image\n image_layer = np.sum(warped[int(warped.shape[0]-(level+1)*window_height):int(warped.shape[0]-level*window_height),:], axis=0)\n conv_signal = np.convolve(window, image_layer)\n # Find the best left centroid by using past left center as a reference\n # Use window_width/2 as offset because convolution signal reference is at right side of window, not center of window\n offset = window_width/2\n l_min_index = int(max(l_center+offset-margin,0))\n l_max_index = int(min(l_center+offset+margin,warped.shape[1]))\n l_center = np.argmax(conv_signal[l_min_index:l_max_index])+l_min_index-offset\n l_confidence = np.sum(conv_signal[l_min_index:l_max_index]/np.sum(conv_signal))\n\n # Find the best right centroid by using past right center as a reference\n r_min_index = int(max(r_center+offset-margin,0))\n r_max_index = int(min(r_center+offset+margin,warped.shape[1]))\n r_center = np.argmax(conv_signal[r_min_index:r_max_index])+r_min_index-offset\n r_confidence = np.sum(conv_signal[r_min_index:r_max_index]/np.sum(conv_signal))\n\n # Add what we found for that layer\n window_centroids.append((window_height/2 + window_height*level, l_center, r_center, l_confidence, r_confidence))\n\n return window_centroids\n\n def draw_overlay_image(self, binary_warp, image_warp, window_centroids, outlier_l, outlier_r):\n\n #assert binary_warp.shape[2] == 1, \"binary_warp not binary\"\n \n # If we found any window centers\n if len(window_centroids) > 0:\n\n # Points used to draw all the left and right windows\n l_points = np.zeros_like(binary_warp)\n r_points = np.zeros_like(binary_warp)\n\n # Go through each level and draw the windows \n for level in range(0,len(window_centroids)):\n # Window_mask is a function to draw window areas\n l_mask = self.window_mask(binary_warp,window_centroids[level][1],level)\n r_mask = self.window_mask(binary_warp,window_centroids[level][2],level)\n # Add graphic points from window mask here to total pixels found \n l_points[(l_points == 255) | ((l_mask == 1) )] = 50 + 200*outlier_l[level];\n r_points[(r_points == 255) | ((r_mask == 1) )] = 50 + 200*outlier_r[level];\n\n # Draw the results\n template = np.array(r_points+l_points,np.uint8) # add both left and right window pixels together\n zero_channel = np.zeros_like(template) # create a zero color channle \n template = np.array(cv2.merge((zero_channel,template,zero_channel)), np.uint8) # make window pixels green\n warpage = np.array(cv2.merge((binary_warp,binary_warp,binary_warp)), np.uint8) # making the original road pixels 3 color channels\n output1 = cv2.addWeighted(warpage*255, 1, template, 0.5, 0.0) # overlay the orignal road image with window results \n output2 = cv2.addWeighted(image_warp, 1, template, 0.5, 0.0) # overlay the orignal road image with window results \n\n # If no window centers found, just display orginal road image\n else:\n output1 = np.array(cv2.merge((binary_warped, binary_warped, binary_warped)),np.uint8)\n output2 = output1 \n return output1, output2 \n\n def draw_lanes(self, image, returnOverlayOnly = False):\n overlay = np.zeros_like(image).astype(np.uint8)\n \n (ploty, left_fitx) = self.lane_left.generate_road(10, 650);\n (ploty, right_fitx) = self.lane_right.generate_road(10, 650);\n offset_x = 1.5; # line width in px\n left_fitx += offset_x;\n right_fitx += offset_x;\n\n # Recast the x and y points into usable format for cv2.fillPoly()\n pts_left = np.array([np.transpose(np.vstack([left_fitx, image.shape[0]-ploty]))])\n pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, image.shape[0]-ploty])))])\n pts = np.hstack((pts_left, pts_right))\n\n # Draw the lane onto the warped blank image\n cv2.fillPoly(overlay, np.int_([pts]), (0, 255, 0))\n\n # Combine the result with the original image\n if returnOverlayOnly:\n return overlay;\n else:\n result = cv2.addWeighted(image, 1, overlay, 0.5, 0) \n return result\n\n def draw_lanes_warped(self, cam, image):\n color_warp = self.draw_lanes(image, returnOverlayOnly=True)\n\n # Draw original points:\n for k in range(2):\n if k == 0: lane = self.lane_left;\n if k == 1: lane = self.lane_right;\n for i in range(len(lane.ally)):\n x0 = int(lane.allx[i])\n y0 = color_warp.shape[0]-int(lane.ally[i])\n cv2.circle(color_warp, (x0,y0), int(2+i*1.2), (0,0,255), -1)\n\n # Warp the blank back to original image space using inverse perspective matrix (Minv)\n newwarp = cam.unwarp_birdeye(color_warp);\n\n # Combine the result with the original image\n result = cv2.addWeighted(image, 1, newwarp, 0.5, 0)\n\n # Print text\n r1 = self.lane_left.get_radius()\n r2 = self.lane_right.get_radius()\n d1 = self.lane_left.get_vehicle_offset()\n d2 = self.lane_right.get_vehicle_offset()\n p1 = self.lane_left.last_plausible;\n p2 = self.lane_right.last_plausible;\n r = np.mean([r1,r2]);\n d = np.sum([d1,d2]);\n font = cv2.FONT_HERSHEY_SIMPLEX\n cv2.putText(result, 'Curve radius: %.0fm' % (r), (20,50), font, 1.5, (255,255,255), 3)\n cv2.putText(result, 'Vehicle is %.3fm off center' % (d), (20,100), font, 1.5, (255,255,255), 3)\n cv2.putText(result, '%d|%d' % (p1,p2), (20,150), font, 1.5, (255,255,255), 3)\n return result\n\n def mask_image(self, img):\n img = img.copy();\n mask_right = np.array([(850,img.shape[0]),(img.shape[1],0),(img.shape[1],img.shape[0])])\n mask_left = np.array([(150,img.shape[0]),(0,0),(0,img.shape[0])])\n img = cv2.fillConvexPoly(img, mask_right, 0)\n img = cv2.fillConvexPoly(img, mask_left, 0)\n return img\n\n def pipeline(self, cam, imgfilter, img, includeDebugImages = True):\n\n # 1. undisdort\n img = img.copy();\n img_undist = cam.undistort_img(img);\n\n # 2. calculate gradients\n _, img_gradient = imgfilter.gradient_filter(img_undist)\n img_gradient = np.array(np.dstack((img_gradient, img_gradient, img_gradient)), np.uint8)\n\n # 3. warp to birdeye view\n img_gradient_warp = cam.warp_birdeye(img_gradient)\n img_warp = cam.warp_birdeye(img_undist)\n\n # 3b. mask image\n img_gradient_warp = self.mask_image(img_gradient_warp);\n\n # 4. find centroids\n window_centroids = self.find_window_centroids(img_gradient_warp[:,:,0]);\n window_centroids = np.array(window_centroids);\n self.previous_centroids = window_centroids;\n\n # 5. fit lanes\n outlier_l= self.lane_left.fit_next(window_centroids[:,0], window_centroids[:,1], window_centroids[:,3]);\n outlier_r = self.lane_right.fit_next(window_centroids[:,0], window_centroids[:,2], window_centroids[:,4]);\n\n # Draw overlay images\n if includeDebugImages:\n img_gradient_overlay, img_overlay = self.draw_overlay_image(img_gradient_warp[:,:,0], img_warp, window_centroids, outlier_l, outlier_r)\n img_overlay_fit = self.draw_lanes(img_warp);\n img_warp_with_lanes = self.draw_lanes_warped(cam, img_undist);\n\n return {'gradient': img_gradient_overlay,\n 'image': img_overlay,\n 'gradient_warp': img_gradient_warp[:,:,0],\n 'image_warp': img_warp,\n 'image_fit': img_overlay_fit,\n 'final': img_warp_with_lanes}\n else:\n img_warp_with_lanes = self.draw_lanes_warped(cam, img_undist);\n return {'final': img_warp_with_lanes}\n","sub_path":"lib/lane_detector.py","file_name":"lane_detector.py","file_ext":"py","file_size_in_byte":10371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"522969297","text":"import os\nimport re\nimport hashlib\nimport psycopg2\nimport psycopg2.extras\nimport urlparse\nimport datetime\nfrom flask import g\nfrom . import utils\n\nOBJECT_ID_CHARS = (\n '0', '1', '2', '3', '4', '5', '6', '7', '8', '9',\n 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K',\n 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V',\n 'W', 'X', 'Y', 'Z' )\n\nNUM_OBJECT_ID_CHARS = len(OBJECT_ID_CHARS)\n\ndef object_id(string, length=8):\n object_hash = int(hashlib.sha512(string).hexdigest(), 16)\n code = object_hash % pow(NUM_OBJECT_ID_CHARS, length)\n object_id = ''\n\n # Build object_id char by char, least significant to most significant digit\n for unused in range(length):\n code_char_index = code % NUM_OBJECT_ID_CHARS\n object_id = OBJECT_ID_CHARS[code_char_index] + object_id\n code /= NUM_OBJECT_ID_CHARS\n\n return object_id\n\n\n\n\n\"\"\" Only use this if you really need it. Use cursor() and commit() instead. \"\"\"\ndef get_connection():\n conn = getattr(g, '_connection', None)\n if conn is None:\n urlparse.uses_netloc.append(\"postgres\")\n url = urlparse.urlparse(os.environ[\"DATABASE_URL\"])\n conn = g._connection = psycopg2.connect(\n database=url.path[1:],\n user=url.username,\n password=url.password,\n host=url.hostname,\n port=url.port\n )\n return conn\n\ndef cursor():\n return get_connection().cursor(cursor_factory=psycopg2.extras.DictCursor)\n\ndef commit():\n return get_connection().commit()\n\ndef rollback():\n return get_connection().rollback()\n\ndef close():\n conn = getattr(g, '_connection', None)\n if conn is not None:\n conn.close()\n delattr(g, '_connection')\n return True\n\n\n\n# -------------------------------------------------------\n# actual data helper functions\n#\n\n\"\"\" Pass in a cursor and a user id, and get back a dictionary of the\nuser in the database, raw. Extract values to make a proper domain namedtuple\ndata object. \"\"\"\ndef user_by_id(cursor, user_id):\n if user_id is None:\n return None\n cursor.execute(\"\"\"\n SELECT * FROM users WHERE user_id = %s\n \"\"\", (user_id,))\n if cursor.rowcount == 1:\n return cursor.fetchone()\n return None\n\n\n\"\"\" Pass in a cursor and a schedule id, and get back a dictionary of the\nschedule in the database, raw. Extract values to make a proper domain\nnamedtuple data object. \"\"\"\ndef schedule_by_id(cursor, schedule_id):\n if schedule_id is None:\n return None\n cursor.execute(\"\"\"\n SELECT * FROM schedule WHERE schedule_id = %s\n \"\"\", (schedule_id,))\n if cursor.rowcount == 1:\n return cursor.fetchone()\n return None\n\n\n\"\"\" Pass in a cursor and a user object id, and get back a dictionary of the\nuser in the database, raw. Extract values to make a proper domain namedtuple\ndata object. \"\"\"\ndef user(cursor, user_object_id):\n if user_object_id is None:\n return None\n cursor.execute(\"\"\"\n SELECT * FROM users WHERE object_id = %s\n \"\"\", (user_object_id,))\n if cursor.rowcount == 1:\n return cursor.fetchone()\n return None\n\n\n\"\"\" Pass in a cursor and a schedule object id, and get back a dictionary of the\nschedule in the database, raw. Extract values to make a proper domain\nnamedtuple data object. \"\"\"\ndef schedule(cursor, schedule_object_id):\n if schedule_object_id is None:\n return None\n cursor.execute(\"\"\"\n SELECT * FROM schedule WHERE object_id = %s\n \"\"\", (schedule_object_id,))\n if cursor.rowcount == 1:\n return cursor.fetchone()\n return None\n\n\n\"\"\" Pass in a schedule object_id and get back the date of the first shift \nwith a given state. None if there is none. \"\"\"\ndef schedule_first_date(cursor, schedule_object_id, shift_state):\n if schedule_object_id is None:\n return None\n cursor.execute(\"\"\"\n SELECT sh.start_timestamp\n FROM shift sh\n INNER JOIN schedule s ON s.schedule_id = sh.schedule_id\n WHERE s.object_id = %s\n AND sh.state = %s\n ORDER BY sh.start_timestamp ASC\n LIMIT 1\n \"\"\", (schedule_object_id, shift_state))\n if cursor.rowcount > 0:\n return cursor.fetchone()[0]\n return None\n\n\n\"\"\" Pass in a schedule object_id and get back the date of the first shift \nwith a given state. None if there is none. \"\"\"\ndef schedule_last_date(cursor, schedule_object_id, shift_state):\n if schedule_object_id is None:\n return None\n cursor.execute(\"\"\"\n SELECT sh.end_timestamp\n FROM shift sh\n INNER JOIN schedule s ON s.schedule_id = sh.schedule_id\n WHERE s.object_id = %s\n AND sh.state = %s\n ORDER BY sh.end_timestamp DESC\n LIMIT 1\n \"\"\", (schedule_object_id, shift_state))\n if cursor.rowcount > 0:\n return cursor.fetchone()[0]\n return None\n\n\n\"\"\" Pass in a cursor and schedule_object_id and receive back a list of\nrows that can be used in the constructor for OrderedScheduleUser objects. \nTODO: Note the horrible hardcoded constants... :( \"\"\"\ndef ordered_schedule_participants(cursor, schedule_object_id):\n if schedule_object_id is None:\n return None\n cursor.execute(\"\"\"\n SELECT u.name, u.email, u.object_id,\n u.state, usr.nickname, orders.user_order, usr.state as usr_state,\n usr.schedgen_eligible as usr_schedgen_elibigle\n FROM users u\n INNER JOIN user_schedule_role usr ON usr.user_id=u.user_id\n INNER JOIN schedule s ON s.schedule_id=usr.schedule_id\n INNER JOIN role r ON r.role_id=usr.role_id\n INNER JOIN ( \n SELECT usr.user_id, row_number() OVER() - 1 AS user_order\n FROM user_schedule_role usr\n INNER JOIN schedule s USING (schedule_id)\n INNER JOIN role r USING (role_id)\n WHERE s.object_id = %(soid)s\n AND r.name = %(rname)s\n ORDER BY usr.created_timestamp ASC ) orders\n ON usr.user_id=orders.user_id\n WHERE s.object_id = %(soid)s\n AND usr.state IN (%(state1)s, %(state2)s)\n AND r.name = %(rname)s\n \"\"\", { 'soid': schedule_object_id,\n 'rname': 'participant', # ROLE_PARTICIPANT\n 'state1': 'active', # USR_STATE_ACTIVE\n 'state2': 'nominated' }) # USR_STATE_NOMINATED\n return cursor.fetchall()\n\n\n\"\"\" Pass in a cursor and stuff and True means the note was inserted, false\notherwise. This method will not commit the insert, or rollback on failure, so\nyou're responsible for that. \"\"\"\ndef create_note(cursor, state, note_type, code, data=None, to_user_id=None, \n to_schedule_id=None, from_user_id=None, from_schedule_id=None):\n note_object_id = object_id(\"notification\" + str(to_user_id) +\n str(to_schedule_id) +\n datetime.datetime.utcnow().isoformat(), 15)\n cursor.execute(\"\"\"\n INSERT INTO notification (object_id, state, created_timestamp,\n type, code, data, to_user_id, to_schedule_id,\n from_user_id, from_schedule_id)\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\n \"\"\", (note_object_id, state, datetime.datetime.utcnow().isoformat(), \n note_type, code, data, to_user_id, to_schedule_id, from_user_id,\n from_schedule_id))\n return cursor.rowcount == 1\n\n\n\"\"\" Pass in a cursor, and the notification object id you want resolved. \nCommit/rollback is up to the caller. Returns false on error, true otherwise. \"\"\"\ndef resolve_note(cursor, notification_object_id):\n cursor.execute(\"\"\"\n UPDATE notification \n SET state = 'resolved', resolved_timestamp = now()\n WHERE object_id = %s\n \"\"\", (notification_object_id,))\n return cursor.rowcount == 1\n\n\"\"\" Pass in a notification and get back a dictionary of key=value pairs\nembedded in it. They should be packed like HTTP query string arguments. \"\"\"\ndef unpack_note_data(data):\n response = {}\n if data is None or data == '':\n return response\n pairs = data.split('&')\n for pair in pairs:\n try:\n key, value = pair.split('=')\n except ValueError as e:\n continue\n response[key] = value\n return response\n\n\"\"\" Use keyword arguments to pass in key=value pairs and receive back\na string to store in notification.data. \"\"\"\ndef pack_note_data(**kwargs):\n pairs = []\n for key, value in kwargs.iteritems():\n pairs.append('%s=%s' % (key, value))\n return '&'.join(pairs)\n\n\n\"\"\" Pass in a list of domain.ShiftTemplate objects, and the start and end\nweekday integers and hour integers for a candidate new ShiftTemplate. Returns\nTrue if it's ok, and False if it overlaps with any of the other ShiftTemplates.\nWeekdays are 0=Sun..6=Sat. \"\"\"\ndef shift_template_fits(shift_templates, \n start_weekday, start_hour,\n end_weekday, end_hour):\n # Fuss with the start and end weekdays so they're comparable. Hard because\n # they wrap around to zero after seven days. So put them on the same\n # linear, non-wrapped-around scale. Then create numerical indices from the\n # weekday and time so that we have one number to compare overlaps for. To\n # make sure that the candidate shift doesn't hit an overlap, try it twice,\n # once at low numbers and then at +7. This will cover all the cases where a\n # shift wraps the week.\n candidate_start_weekday = int(start_weekday)\n candidate_end_weekday = int(end_weekday)\n\n candidate_start_index = int('%02d%02d' % (candidate_start_weekday, start_hour))\n candidate_end_index = int('%02d%02d' % (candidate_end_weekday, end_hour))\n candidate_start_index_2 = int('%02d%02d' % (candidate_start_weekday + 7, \n start_hour))\n candidate_end_index_2 = int('%02d%02d' % (candidate_end_weekday + 7,\n end_hour))\n\n for shift in shift_templates:\n shift_start_weekday = int(shift.start_weekday)\n shift_end_weekday = int(shift.end_weekday)\n\n start_index = int('%02d%02d' % (shift_start_weekday, shift.start_hour))\n end_index = int('%02d%02d' % (shift_end_weekday, shift.end_hour))\n start_index_2 = int('%02d%02d' % (shift_start_weekday + 7, \n shift.start_hour))\n end_index_2 = int('%02d%02d' % (shift_end_weekday + 7, \n shift.end_hour))\n\n if start_index <= candidate_start_index and \\\n end_index > candidate_start_index:\n return False\n if start_index > candidate_start_index and \\\n start_index < candidate_end_index:\n return False\n if start_index <= candidate_start_index_2 and \\\n end_index > candidate_start_index_2:\n return False\n if start_index > candidate_start_index_2 and \\\n start_index < candidate_end_index_2:\n return False\n if start_index_2 <= candidate_start_index and \\\n end_index_2 > candidate_start_index:\n return False\n if start_index_2 > candidate_start_index and \\\n start_index_2 < candidate_end_index:\n return False\n return True\n\n\n# -------------------------------------------------------\n# utilities\n#\n\n\n\"\"\" Checks if an entered nickname is actually an email. This is not RFC2822\ncompliant. It exists just to descriminate schedule owner's nicknames from \nemail addresses. If he types in something@something.something, we take\nthat as an indication of intent to use an email. \"\"\"\ndef is_email(string):\n if re.match(r\"[^@]+@[^@]+\\.[^@]+\", string):\n return True\n return False\n\n\"\"\" For the domain object domain.PublicUser, the masked_email field\nshould be filled with the return value from this function. Pass in the\nraw email field from the database and this will return a version of it\nthat can be sent to non-administrators. \"\"\"\ndef mask_email(email=''):\n masked_email = re.sub(r'@.*', '@...', email)\n return masked_email\n\n\n\"\"\" datetime module objects cannot be serialized in json queries, so \nwe need helper methods to perform translation.\n\nThis works with a naming convention. Pass in a namedtuple, and receive back a\ndict with the same attributes and values, except for translation for times. Any\nattribute of type `datetime.time`, `datetime.date`, or `datetime.timestamp`\nwill have its value replaced with the value's `isoformat()`. \"\"\"\n\ndef stringify_datetime(domaintuple):\n domaindict = domaintuple._asdict()\n for attr in domaindict:\n if isinstance(domaindict[attr], (datetime.time, datetime.date, datetime.datetime)):\n domaindict[attr] = domaindict[attr].isoformat()\n return domaintuple._make(domaindict.values())\n\n\"\"\" stringifying lists of namedtuples. See stringify_datetime for details.\n\nPass in a list of domain tuples and receive back a list of domain tuples\nwith sane datetime representations for the wire. \"\"\"\ndef stringify_datetimes(tuplelist):\n retval = []\n for domaintuple in tuplelist:\n retval.append(stringify_datetime(domaintuple))\n return retval\n\n\n\n\"\"\" The weekdays used by the shift_template object are not quite the same as \nisoweekday. They start Sunday=0, Monday=1, but the following Sunday=7. This\nway a full week can be specified, such as (Sunday[0] 8am -> Sunday[7] 8am).\n\nThat means the database and API should use integers {0-7} to represent\nthe weekday field. And we need to provide a way to translate on the client,\nas well as the server. \"\"\"\n# TODO: i18n\n_weekdays = {\n 0: 'Sunday',\n 1: 'Monday',\n 2: 'Tuesday',\n 3: 'Wednesday',\n 4: 'Thursday',\n 5: 'Friday',\n 6: 'Saturday',\n 7: 'Sunday',\n}\ndef stringify_weekday(domaintuple):\n domaindict = domaintuple._asdict()\n for attr in domaindict:\n if 'weekday' in attr and domaindict[attr] in _weekdays:\n domaindict[attr] = _weekdays(domaindict[attr])\n return domaintuple._make(domaindict.values())\n\n\n\n\n\nif __name__ == '__main__':\n if object_id('steve') != '3YZLTG7C': \n print(\"Failed, object_id('steve') should be '3YZLTG7C', not '%s'\" % \n object_id('steve'))\n if object_id('steve', 12) != 'HD313YZLTG7C':\n print(\"Failed, object_id('steve', 12) should be 'HD313YZLTG7C', \" +\n \"not '%s'\" % object_id('steve', 12))\n\n\n\n","sub_path":"ocr/domainutils.py","file_name":"domainutils.py","file_ext":"py","file_size_in_byte":14441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"556425955","text":"from service import build_iip\n\n\ndef main():\n mip = build_iip()\n sent_1 = \"qualities attributed to the drug. It is a catch 22 for any trainer or owner.\"\n\n for token in mip(sent_1):\n print(token.sentiment)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"scripts/sent_analysis.py","file_name":"sent_analysis.py","file_ext":"py","file_size_in_byte":260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"2065466","text":"\nfrom flask import *\nfrom PIL import Image\nimport os\nimport glob\nimport random\nimport psycopg2\nimport config_param as config\n\n\napp = Flask(__name__)\n\n@app.route('/')\ndef main():\n\n return render_template(\"index.html\", lists1=config.name_list, lists2=config.num_pics_list)\n\n\n@app.route('/index', methods=[\"POST\"])\ndef index():\n\n name = request.form.get(\"name\")\n\n num_pics = int(request.form.get(\"num_pics\"))\n\n for file in glob.glob('./static/pics/*.jpg'):\n os.remove(file)\n\n selected_name_list = [\"selected\" if i == config.name_list.index(name) else None for i in range(len(config.name_list))]\n \n selected_pics_list = [\"selected\" if i == config.num_pics_list.index(num_pics) else None for i in range(len(config.num_pics_list))]\n\n file_list = []\n\n id_list = [random.randint(0, 40) for i in range(num_pics)]\n\n \n for id_ in id_list:\n \n query = \"SELECT * FROM izonetable WHERE member = %s AND id = %s\"\n\n filename = \"./static/pics/pic_{}.jpg\".format(id_)\n\n DATABASE_URL = \"postgres://youylcnkjyfyfy:20a5d945df5a9da524c823962294428191105ef78735d82ff42d3ba216642a5b@ec2-50-16-198-4.compute-1.amazonaws.com:5432/d8o6aq59fi4v03\"\n # connect = psycopg2.connect(DATABASE_URL, sslmode='require')\n connect = psycopg2.connect(\n database = 'izonedb',\n user ='makotonakai',\n password = 'postgresql',\n host = '192.168.50.30',\n port = 5432\n )\n cursor = connect.cursor() \n\n try:\n\n cursor.execute(query, (name, id_))\n\n fetch = cursor.fetchone()\n \n image = fetch[2]\n\n with open(filename, 'wb') as f:\n f.write(bytes(image))\n\n file_list.append(filename)\n\n except Exception as e:\n print(e)\n\n finally:\n cursor.close()\n connect.close()\n\n return render_template(\"result.html\", lists1=zip(config.name_list, selected_name_list), lists2=zip(config.num_pics_list, selected_pics_list), file_list=file_list)\n\n\nif __name__ == \"__main__\":\n\n app.run(host=\"0.0.0.0\", debug=True, threaded=True)\n\n\n \n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"377123073","text":"#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nfrom collections import defaultdict\nfrom typing import Dict, List\n\nimport numpy as np\nfrom pytext.common.constants import Stage\nfrom pytext.data.tensorizers import Tensorizer\nfrom pytext.metric_reporters.channel import ConsoleChannel\n\n# These classes have been migrated to the open source directories. Imported\n# here for compatibility purposes.\nfrom pytext.metric_reporters.seq2seq_compositional import ( # noqa\n CompositionalSeq2SeqFileChannel,\n Seq2SeqCompositionalMetricReporter,\n)\nfrom pytext.metric_reporters.seq2seq_metric_reporter import ( # noqa\n Seq2SeqFileChannel,\n Seq2SeqMetricReporter,\n)\nfrom pytext.metric_reporters.seq2seq_utils import stringify\nfrom pytext.metrics import safe_division\nfrom pytext.metrics.mask_metrics import (\n compute_length_metrics,\n)\nfrom pytext.metrics.seq2seq_metrics import MaskedSeq2SeqTopKMetrics\n\n\nclass MaskedSeq2SeqTopKMetricReporter(Seq2SeqMetricReporter):\n class Config(Seq2SeqMetricReporter.Config):\n model_select_metric_key: str = \"all_loss\"\n select_length_beam: int = 0\n log_gradient: bool = True\n TEMP_DUMP_PREDICTIONS: bool = True\n log_samplewise_losses: bool = True\n print_length_metrics: bool = True\n\n def __init__(\n self,\n channels,\n log_gradient,\n tensorizers,\n model_select_metric_key,\n select_length_beam,\n print_length_metrics,\n ):\n super().__init__(channels, log_gradient, tensorizers)\n self.model_select_metric_key = model_select_metric_key\n if model_select_metric_key == \"em\":\n self.lower_is_better = False\n else:\n self.lower_is_better = True\n self.select_length_beam = select_length_beam\n self.print_length_metrics = print_length_metrics\n\n @classmethod\n def from_config(cls, config: Config, tensorizers: Dict[str, Tensorizer]):\n channels = [ConsoleChannel()]\n if config.TEMP_DUMP_PREDICTIONS:\n channels.append(\n Seq2SeqFileChannel([Stage.TEST], config.output_path, tensorizers),\n )\n return cls(\n channels,\n config.log_gradient,\n tensorizers,\n config.model_select_metric_key,\n config.select_length_beam,\n config.print_length_metrics,\n )\n\n def _reset(self):\n super()._reset()\n self.all_target_length_preds: List = []\n self.all_beam_preds: List[List[str]] = []\n self.all_loss: Dict[str, List] = defaultdict(list)\n self.all_target_lens: List = []\n self.all_target_trees: List = []\n\n def add_batch_stats(\n self, n_batches, preds, targets, scores, loss, m_input, **context\n ):\n super().add_batch_stats(\n n_batches, preds, targets, scores, None, m_input, **context\n )\n self.all_loss[\"all_loss\"].append(float(loss[0]))\n\n custom_losses = loss[1].keys()\n for loss_name in custom_losses:\n vals = self.all_loss[loss_name]\n # samplewise losses are stored as multi-element tensors, so need to separate cases\n if \"samplewise\" in loss_name:\n vals.append(loss[1][loss_name].data.cpu().numpy())\n else:\n vals.append(float(loss[1][loss_name]))\n\n def calculate_loss(self):\n \"\"\"\n Calculate the average loss for all aggregated batch\n \"\"\"\n loss_agg = {}\n for loss_name in self.all_loss.keys():\n if \"samplewise\" in loss_name:\n self.all_context.setdefault(\"losses\", {})[loss_name] = np.concatenate(\n self.all_loss[loss_name], axis=None\n )\n else:\n loss_agg[loss_name] = np.average(\n self.all_loss[loss_name], weights=self.batch_size\n )\n\n return loss_agg\n\n def calculate_metric(self):\n total_exact_match = 0\n pred_exact_match = 0\n num_samples = len(self.all_target_trees)\n for (beam_pred, target) in zip(self.all_beam_preds, self.all_target_trees):\n for (index, pred) in enumerate(beam_pred):\n if self._compare_target_prediction_tokens(pred, target):\n total_exact_match += 1\n if index == 0:\n pred_exact_match += 1\n break\n exact_match = round(safe_division(pred_exact_match, num_samples) * 100.0, 2)\n exact_match_top_k = round(\n safe_division(total_exact_match, num_samples) * 100.0, 2\n )\n k = 0 if len(self.all_preds) == 0 else len(self.all_beam_preds[0])\n length_metrics, length_reports = compute_length_metrics(\n self.all_target_lens, self.all_target_length_preds, self.select_length_beam\n )\n return MaskedSeq2SeqTopKMetrics(\n loss=self.calculate_loss(),\n exact_match=exact_match,\n f1=-1,\n bleu=-1,\n k=k,\n exact_match_top_k=exact_match_top_k,\n f1_top_k=-1,\n bleu_top_k=-1,\n length_metrics=length_metrics,\n length_reports=length_reports,\n )\n\n def aggregate_targets(self, new_batch, context=None):\n if new_batch is None:\n return\n target_vocab = self.tensorizers[\"trg_seq_tokens\"].vocab\n target_pad_token = target_vocab.get_pad_index()\n target_bos_token = target_vocab.get_bos_index()\n target_eos_token = target_vocab.get_eos_index()\n\n cleaned_targets = [\n self._remove_tokens(\n target, [target_pad_token, target_eos_token, target_bos_token]\n )\n for target in self._make_simple_list(new_batch[0])\n ]\n\n self.aggregate_data(self.all_targets, cleaned_targets)\n self.aggregate_data(self.all_target_lens, new_batch[1])\n\n target_res = [stringify(target, target_vocab) for target in cleaned_targets]\n\n self.aggregate_data(self.all_target_trees, target_res)\n\n def aggregate_preds(self, new_batch, context=None):\n if new_batch is None:\n return\n tree_preds = new_batch[0] # bsz X beam_size X seq_len\n length_preds = new_batch[1]\n target_vocab = self.tensorizers[\"trg_seq_tokens\"].vocab\n target_pad_token = target_vocab.get_pad_index()\n target_bos_token = target_vocab.get_bos_index()\n target_eos_token = target_vocab.get_eos_index()\n cleaned_preds = [\n self._remove_tokens(\n pred, [target_pad_token, target_eos_token, target_bos_token]\n )\n for pred in self._make_simple_list(tree_preds)\n ]\n self.aggregate_data(self.all_preds, cleaned_preds)\n\n beam_pred_res = [\n [stringify(pred, target_vocab) for pred in beam] for beam in cleaned_preds\n ]\n\n self.aggregate_data(self.all_target_length_preds, length_preds)\n self.aggregate_data(self.all_beam_preds, beam_pred_res)\n\n def get_model_select_metric(self, metrics):\n if self.model_select_metric_key == \"em\":\n return metrics.exact_match\n else:\n return metrics.loss[self.model_select_metric_key]\n","sub_path":"pytext/metric_reporters/mask_seq2seq_topk.py","file_name":"mask_seq2seq_topk.py","file_ext":"py","file_size_in_byte":7293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"592025768","text":"from . import Model\n\n\ndef hepdata_like(signal_data, bkg_data, bkg_uncerts, batch_size=None):\n \"\"\"\n Construct a simple single channel :class:`~pyhf.pdf.Model` with a\n :class:`~pyhf.modifiers.shapesys` modifier representing an uncorrelated\n background uncertainty.\n\n Example:\n >>> import pyhf\n >>> pyhf.set_backend(\"numpy\")\n >>> model = pyhf.simplemodels.hepdata_like(\n ... signal_data=[12.0, 11.0], bkg_data=[50.0, 52.0], bkg_uncerts=[3.0, 7.0]\n ... )\n >>> model.schema\n 'model.json'\n >>> model.config.channels\n ['singlechannel']\n >>> model.config.samples\n ['background', 'signal']\n >>> model.config.parameters\n ['mu', 'uncorr_bkguncrt']\n >>> model.expected_data(model.config.suggested_init())\n array([ 62. , 63. , 277.77777778, 55.18367347])\n\n Args:\n signal_data (:obj:`list`): The data in the signal sample\n bkg_data (:obj:`list`): The data in the background sample\n bkg_uncerts (:obj:`list`): The statistical uncertainty on the background sample counts\n batch_size (:obj:`None` or :obj:`int`): Number of simultaneous (batched) Models to compute\n\n Returns:\n ~pyhf.pdf.Model: The statistical model adhering to the :obj:`model.json` schema\n\n \"\"\"\n spec = {\n 'channels': [\n {\n 'name': 'singlechannel',\n 'samples': [\n {\n 'name': 'signal',\n 'data': signal_data,\n 'modifiers': [\n {'name': 'mu', 'type': 'normfactor', 'data': None}\n ],\n },\n {\n 'name': 'background',\n 'data': bkg_data,\n 'modifiers': [\n {\n 'name': 'uncorr_bkguncrt',\n 'type': 'shapesys',\n 'data': bkg_uncerts,\n }\n ],\n },\n ],\n }\n ]\n }\n return Model(spec, batch_size=batch_size)\n","sub_path":"src/pyhf/simplemodels.py","file_name":"simplemodels.py","file_ext":"py","file_size_in_byte":2243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"278793133","text":"from django.contrib import admin\nfrom django.contrib.admin import AdminSite\n\nfrom .models import About, Contact, Enquiry\n\n\nclass AboutAdmin(admin.ModelAdmin):\n list_display = ('title', 'position')\n search_fields = ['title', 'description']\n\n\n\n\n\nadmin.site.register(About, AboutAdmin)\nadmin.site.register(Contact)\nadmin.site.register(Enquiry)\n\n\n\nAdminSite.site_header = 'Change Makers Site Administration'\nAdminSite.site_title = 'Change Makers Admin'\nAdminSite.index_title = 'Change Makers Site Admin Dashboard'\n","sub_path":"cms/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"395090462","text":"# Copyright 2021 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport dpctl\nimport pytest\n\nimport numba_dppy as dppy\nfrom numba_dppy.tests._helper import skip_test\n\nlist_of_filter_strs = [\n \"opencl:gpu:0\",\n \"level_zero:gpu:0\",\n \"opencl:cpu:0\",\n]\n\n\n@pytest.fixture(params=list_of_filter_strs)\ndef filter_str(request):\n return request.param\n\n\ndef test_dpctl_api(filter_str):\n if skip_test(filter_str):\n pytest.skip()\n\n device = dpctl.SyclDevice(filter_str)\n with dppy.offload_to_sycl_device(device):\n dpctl.lsplatform()\n dpctl.get_current_queue()\n dpctl.get_num_activated_queues()\n dpctl.is_in_device_context()\n","sub_path":"numba_dppy/tests/test_dpctl_api.py","file_name":"test_dpctl_api.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"322263519","text":"# Create function with name outer(name). This function should return inner function with name inner.\n# This inner function prints message Hello, !\n# For example\n# tom = outer(\"tom\")\n# tom() -> Hello, tom!\n\ndef outer(name):\n def inner():\n print(f\"Hello, {name}!\")\n return inner\n\n\nif ('outer' in locals()):\n print('function \"outer\" is present')\nelse:\n print('function \"outer\" is absent')\n\nouter(\"Tom\")()\n\nalice = outer(\"Alice\")\nalice()","sub_path":"3_sprint/Tasks/s3.1.py","file_name":"s3.1.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"377327524","text":"#!/usr/bin/env python\nimport math\nimport itertools\n\nf = open('input', 'r')\ndata = f.readline().strip()\nf.close()\n\nvalue = int(data)\n\n#value = 1024\n\ndef gen_tier(tier):\n # tier is zero based\n # order: bottom, left, top, right\n if tier == 0:\n return [list(range(1,2))] * 4\n\n max_sqrt = int((tier * 2) + 1)\n max = max_sqrt ** 2\n\n return (list(range(max-(max_sqrt-1), max+1)),\n list(range(max-2*(max_sqrt-1), max-(max_sqrt-1)+1)),\n list(range(max-3*(max_sqrt-1), max-2*(max_sqrt-1)+1)))\n\n\n# find the max value (lower right hand corner) must be odd\nmax_rt = int(math.ceil(math.sqrt(value)))\nif max_rt % 2 == 0:\n max_rt += 1\nmax = max_rt ** 2\ntier_n = (max_rt - 1) / 2\ntier = gen_tier(tier_n)\n\nfor side in tier:\n if value in side:\n dist_from_middle = abs(value - side[int(len(side)/2)])\n break\n\nprint(dist_from_middle)\nprint(dist_from_middle + tier_n)\n\n\ndef spiral_walk():\n pos = (0,0)\n yield pos\n \n steps_taken = 1\n tier = 1\n while True:\n x = tier * 2\n for move in [(1,0)] + [(0,1)]*(x-1) + [(-1,0)]*x + [(0,-1)]*x + [(1,0)]*x:\n pos = tuple([sum(els) for els in zip(pos, move)])\n yield pos\n steps_taken += 1\n\n tier += 1\n\ndef sum_neighbors(arr, x, y):\n sum_val = 0\n # includes (0,0) which is ok for this problem\n for move in itertools.product([-1,0,1],[-1,0,1]):\n getx, gety = tuple([sum(els) for els in zip((x,y), move)])\n sum_val += arr[getx][gety]\n return sum_val\n\n\ngen = spiral_walk()\nval = 0\nwhile val != value:\n pos = next(gen)\n val += 1\n\ndist = sum(map(abs, pos))\nprint('Part 1: manhatten dist %d' % dist)\n\ngrid = [[0 for x in range(max_rt)] for y in range(max_rt)]\noffset = int((max_rt - 1) / 2)\n\n# treat the first square specially\ngen = spiral_walk()\nnext(gen)\ngrid[offset][offset] = 1\n\nfor x,y in gen:\n sum_val = sum_neighbors(grid, x+offset, y+offset)\n grid[x+offset][y+offset] = sum_val\n\n if sum_val > value:\n break\n\nprint('Part 2: first value greater %d at %d,%d' % (sum_val, x+offset, y+offset))\n\n\n","sub_path":"2017/03/puzzle.py","file_name":"puzzle.py","file_ext":"py","file_size_in_byte":2093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"484580444","text":"from collections import OrderedDict\nimport struct\nimport datetime\n\nclass LogDecoder:\n def __init__(self, ini_file, slave_number=1):\n self.slave_number = slave_number\n self.ini_dict = dict()\n self.open_ini_file(ini_file)\n\n def decode_ini_line(self, line):\n if len(line) > 0 and line.startswith(\"AI\"):\n number = line.split(',')[0].split('=')[1].strip()\n self.ini_dict[\"%i:anin:%s\"%(self.slave_number, number)] = \"float\"\n elif len(line) > 0 and line.startswith(\"AO\"):\n number = line.split(',')[0].split('=')[1].strip()\n self.ini_dict[\"%i:anout:%s\"%(self.slave_number, number)] = \"float\"\n elif len(line) > 0 and line.startswith(\"EVN\"):\n number = line.split(',')[0].split('=')[1].strip()\n self.ini_dict[\"%i:alarm:%s\" % (self.slave_number, number)] = \"bool\"\n elif len(line) > 0 and line.startswith(\"DI\"):\n number = line.split(',')[0].split('=')[1].strip()\n self.ini_dict[\"%i:digin:%s\" % (self.slave_number, number)] = \"bool\"\n elif len(line) > 0 and line.startswith(\"DO\"):\n number = line.split(',')[0].split('=')[1].strip()\n self.ini_dict[\"%i:digout:%s\" % (self.slave_number, number)] = \"bool\"\n elif len(line) > 0 and line.startswith(\"STS\"):\n number = line.split(',')[1].strip()\n self.ini_dict[\"%i:status:%s\" % (self.slave_number, number)] = \"bool\"\n elif len(line) > 0 and line.startswith(\"SETUP\"):\n number = line.split(';')[4].strip()\n setup_type = line.split(';')[2].strip()\n if setup_type.count(\"T_REAL\") > 0:\n self.ini_dict[\"%i:setup:%s\" % (self.slave_number, number)] = \"float\"\n elif setup_type.count(\"T_SHORT\") > 0:\n self.ini_dict[\"%i:setup:%s\" % (self.slave_number, number)] = \"short\"\n elif setup_type.count(\"T_INTEGER\") > 0:\n self.ini_dict[\"%i:setup:%s\" % (self.slave_number, number)] = \"integer\"\n elif setup_type.count(\"T_CARDINAL\") > 0:\n self.ini_dict[\"%i:setup:%s\" % (self.slave_number, number)] = \"cardinal\"\n elif setup_type.count(\"T_LONGCARD\") > 0:\n self.ini_dict[\"%i:setup:%s\" % (self.slave_number, number)] = \"longcard\"\n elif setup_type.count(\"T_BOOLEAN\") > 0:\n self.ini_dict[\"%i:setup:%s\" % (self.slave_number, number)] = \"bool\"\n\n def open_ini_file(self, ini_file):\n with open(ini_file, 'rb') as myFile:\n lines = myFile.readlines()\n for line in lines:\n self.decode_ini_line(line.decode(\"unicode_escape\"))\n\n\n def decode_log_file(self, log_file, time_offset=None):\n with open(log_file, 'rb') as myFile:\n log_dict = OrderedDict()\n log_dict['time'] = []\n content = myFile.read()\n header = None\n\n for i in range(int(len(content)/2)):\n if content[i*2:i*2+2] == b'\\x00;':\n header = content[4:i*2].decode(\"unicode_escape\").replace('\\00', '')\n data = content[i*2+2:]\n break\n if header is None:\n print(\"Error decoding file: %s. Header line not found.\" % log_file)\n return None\n\n for h in header.split(','):\n if h in self.ini_dict:\n log_dict[h] = list()\n else:\n print(\"Error decoding file: %s. Header %s not found.\" % (log_file, h))\n return None\n\n if time_offset is None:\n time_offset_secs = 0\n else:\n time_offset_secs = time_offset*60\n\n index_data = 0\n index_dict = 0\n items_dict = list(log_dict.items())\n while index_data < len(data):\n if items_dict[index_dict][0] == \"time\":\n log_dict[items_dict[index_dict][0]].append(datetime.datetime.fromtimestamp(struct.unpack('>Q', data[index_data:index_data+8])[0]/1000.0+time_offset_secs).strftime('%Y-%m-%d %H:%M:%S.%f'))\n index_data += 8\n elif self.ini_dict[items_dict[index_dict][0]] == \"bool\":\n log_dict[items_dict[index_dict][0]].append(data[index_data])\n index_data += 1\n elif self.ini_dict[items_dict[index_dict][0]] == \"float\":\n log_dict[items_dict[index_dict][0]].append(struct.unpack('>d', data[index_data:index_data+8])[0])\n index_data += 8\n elif self.ini_dict[items_dict[index_dict][0]] == \"short\":\n log_dict[items_dict[index_dict][0]].append(struct.unpack('>B', data[index_data:index_data+1])[0])\n index_data += 1\n elif self.ini_dict[items_dict[index_dict][0]] == \"integer\":\n log_dict[items_dict[index_dict][0]].append(struct.unpack('>h', data[index_data:index_data+2])[0])\n index_data += 2\n elif self.ini_dict[items_dict[index_dict][0]] == \"cardinal\":\n log_dict[items_dict[index_dict][0]].append(struct.unpack('>H', data[index_data:index_data+2])[0])\n index_data += 2\n elif self.ini_dict[items_dict[index_dict][0]] == \"longcard\":\n log_dict[items_dict[index_dict][0]].append(struct.unpack('>I', data[index_data:index_data+4])[0])\n index_data += 4\n index_dict = (index_dict + 1) % len(log_dict)\n return log_dict","sub_path":"scripts/LogDecoder.py","file_name":"LogDecoder.py","file_ext":"py","file_size_in_byte":5567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"565919150","text":"\"\"\"lab5_11\n주제 : 분수\n작성일 : 17. 11. 05.\n작성자 : 201632023 이지훈\n\"\"\"\n\nclass fraction:\n def __init__(self,numer,denom):\n \"\"\"\n 분수 초기화\n :param numer: 분모\n :param denom: 분자\n \"\"\"\n self.numer = numer\n self.denom = denom\n\n def prnt(self):\n \"\"\"\n 출력하는 메소드\n :return: 없음\n \"\"\"\n return \"출력 : \" + str(self.denom) + \"/\" + str(self.numer)\n\n def __str__(self):\n \"\"\"\n :return: 문자열\n \"\"\"\n return \"출력 : \"+str(self.denom)+\"/\"+str(self.numer)\n\n def __add__(self,other):\n \"\"\"\n 두 분수를 더해주는 메소드\n :param other: 더할 분수\n :return: 두 분자의 합의 결과\n \"\"\"\n add = self.numer * other.denom + self.denom * other.numer\n c = self.numer * other.numer\n if(add == 0):\n return fraction(0,0)\n\n for i in range(2,c):\n if(add%i == 0 and c%i == 0):\n add = add//i\n c = c//i\n i = 2\n\n s = fraction(c,add)\n return s\n\n def __sub__(self, other):\n \"\"\"\n 두 분수를 더해주는 메소드\n :param other: 더할 분수\n :return: 두 분자의 합의 결과\n \"\"\"\n sub = self.denom * other.numer - self.numer * other.denom\n c = self.numer * other.numer\n if (sub == 0):\n return fraction(0,0)\n for i in range(2,c):\n if(sub%i == 0 and c%i == 0):\n sub = sub//i\n c = c//i\n i = 2\n s = fraction(c, sub)\n return s\n\n def __eq__(self, other):\n \"\"\"\n 두 분수가 같은지 비교해주는 메소드\n :param other: 비교할 메소드\n :return: True or False\n \"\"\"\n if(self.denom * other.numer == self.numer * other.denom):\n return True\n else:\n return False\n\n def __ne__(self, other):\n \"\"\"\n 두 분수가 같지 않은지 비교해주는 메소드\n :param other: 비교할 메소드\n :return: True or False\n \"\"\"\n if (self.denom * other.numer != self.numer * other.denom):\n return True\n else:\n return False\n\n def __lt__(self, other):\n \"\"\"\n 두 분수중 other가 큰지 비교해 주는 메소드\n :param other: 비교할 메소드\n :return: True or False\n \"\"\"\n if (self.denom * other.numer < self.numer * other.denom):\n return True\n else:\n return False\n\n def __gt__(self, other):\n \"\"\"\n 두 분수중 self가 큰지 비교해 주는 메소드\n :param other: 비교할 메소드\n :return: True or False\n \"\"\"\n if (self.denom * other.numer > self.numer * other.denom):\n return True\n else:\n return False\n\n\n\nc1 = fraction(4,2)\nc2 = fraction(7,4)\nprint(\"1번 분수\",c1)\nprint(\"2번 분수\",c2)\nprint(\"덧셈\",c1+c2)\nprint(\"뺄셈\",c1 - c2)\nprint(\"두 분수가 같은가? :\",c1 == c2)\nprint(\"뒤쪽 분수가 더 큰가? :\",c1 < c2)\nprint(\"앞쪽 분수가 더 큰가? :\",c1 > c2)\nprint(\"두 분수가 같지 않은가? :\",c1 != c2)\n","sub_path":"Python/과제/201632023_14/lab5_11.py","file_name":"lab5_11.py","file_ext":"py","file_size_in_byte":3277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"618371893","text":"import sys\nfrom collections import deque\n\n\npower = ['2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K', 'A']\ndeck_p1 = deque([power.index(input()[:-1]) for _ in range(int(input()))])\ndeck_p2 = deque([power.index(input()[:-1]) for _ in range(int(input()))])\n\nrounds = 0\nwhile deck_p1 and deck_p2:\n rounds += 1\n stack_p1 = [deck_p1.popleft()]\n stack_p2 = [deck_p2.popleft()]\n while stack_p1[-1] == stack_p2[-1]:\n for _ in range(4):\n try:\n stack_p1.append(deck_p1.popleft())\n stack_p2.append(deck_p2.popleft())\n except IndexError:\n print('PAT')\n sys.exit()\n winner = stack_p1[-1] < stack_p2[-1]\n [deck_p1, deck_p2][winner] += stack_p1 + stack_p2\n\nprint('12'[winner], rounds)","sub_path":"Winamax/Others/winamax-battle_Pouf_Lvl20.py","file_name":"winamax-battle_Pouf_Lvl20.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"649439111","text":"# variables\r\nanimal1 = \"cow\"\r\nanimal2 = \"snake\"\r\nanimal3 = \"cat\"\r\nanimal4 = \"kangaroo\"\r\nanimal5 = \"alpaca\"\r\nadjective1 = \"smelly\"\r\nadjective2 = \"tall\"\r\nadjective3 = \"groovy\"\r\nadjective4 = \"sloppy\"\r\nadjective5 = \"vulgar\"\r\nlocation = \"Starbucks\"\r\nfood = \"jelly beans\"\r\n\r\n# Nursery Rhyme\r\nprint(\"This \" + adjective1 + \" \" + animal1 + \" went to \" + location + \",\\n\")\r\nprint(\"This \" + adjective2 + \" \" + animal2 + \" stayed home,\\n\")\r\nprint(\"This \" + adjective3 + \" \" + animal3 + \" had \" + food + \",\\n\")\r\nprint(\"This \" + adjective4 + \" \" + animal4 + \" had none,\\n\")\r\nprint(\"This \" + adjective5 + \" \" + animal5 + \" cried wee wee wee all the way home\")\r\n\r\n#Original Rhyme:\r\n#This little piggy went to market,\r\n#This little piggy stayed home,\r\n#This little piggy had roast beef\r\n#This little piggy had none,\r\n#And this little piggy cried wee wee wee all the way home\r\n","sub_path":"Madlib.py","file_name":"Madlib.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"504909391","text":"import os\nimport subprocess\n\nfrom pybuilder.utils import assert_can_execute\nfrom pybuilder.core import use_plugin, init, Author, task\n\nuse_plugin('python.core')\nuse_plugin('python.install_dependencies')\nuse_plugin('python.distutils')\nuse_plugin('copy_resources')\nuse_plugin('filter_resources')\n\nuse_plugin('python.pycharm')\n\nuse_plugin('python.unittest')\nuse_plugin('python.coverage')\nuse_plugin('python.flake8')\nuse_plugin('python.frosted')\n\nuse_plugin('pypi:pybuilder_header_plugin')\n\n\nauthors = [Author('Maximilien Riehl', 'max@riehl.io')]\n\ndescription = \"\"\"isphere - interactive shell for vsphere\"\"\"\n\nname = 'isphere'\nlicense = 'WTFPL'\nsummary = 'interactive shell for vsphere'\nurl = 'https://github.com/mriehl/isphere'\nversion = '0.0.1'\n\ndefault_task = ['clean', 'analyze', 'publish']\n\n\n@init\ndef set_properties(project):\n project.depends_on('pyvmomi')\n project.depends_on('docopt')\n project.depends_on('cmd2')\n project.build_depends_on('mock')\n\n project.set_property('verbose', True)\n\n project.set_property('flake8_verbose_output', True)\n project.set_property('flake8_include_test_sources', True)\n # E501 line too long\n # E731 do not assign a lambda expression, use a def\n project.set_property('flake8_ignore', 'E501,E731')\n project.set_property('flake8_break_build', True)\n\n FROSTED_BARE_EXCEPT_WARNING = 'W101'\n project.set_property('frosted_ignore', [FROSTED_BARE_EXCEPT_WARNING])\n project.set_property('frosted_include_test_sources', True)\n\n project.set_property('coverage_threshold_warn', 50)\n project.set_property('coverage_break_build', False)\n project.set_property('coverage_exceptions', ['thirdparty.tasks'])\n\n project.set_property('copy_resources_target', '$dir_dist')\n project.get_property('copy_resources_glob').extend(['setup.cfg'])\n project.set_property('filter_resources_glob', ['**/cli.py'])\n\n project.set_property('dir_dist_scripts', 'scripts')\n\n project.set_property('distutils_classifiers', [\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Programming Language :: Python',\n 'Topic :: System :: Systems Administration'\n ])\n\n project.set_property('pybuilder_header_plugin_break_build', False) # embedded 3rd-party sources\n project.set_property('pybuilder_header_plugin_expected_header',\n ('# Copyright (c) 2014-2015 Maximilien Riehl \\n'\n '# This work is free. You can redistribute it and/or modify it under the\\n'\n '# terms of the Do What The Fuck You Want To Public License, Version 2,\\n'\n '# as published by Sam Hocevar. See the COPYING.wtfpl file for more details.\\n'\n '#\\n'))\n\n project.set_property('distutils_console_scripts', ['isphere.exe = isphere.cli:main'])\n\n\n@init(environments='teamcity')\ndef set_properties_for_teamcity_builds(project):\n import os\n project.set_property('teamcity_output', True)\n project.version = '%s-%s' % (project.version, os.environ.get('BUILD_NUMBER', 0))\n project.default_task = ['clean', 'install_build_dependencies', 'publish']\n project.set_property('install_dependencies_index_url', os.environ.get('PYPIPROXY_URL'))\n project.set_property('install_dependencies_use_mirrors', False)\n project.rpm_release = os.environ.get('RPM_RELEASE', 0)\n\n\n@task(\"pdoc_generate_documentation\", \"Generates HTML documentation tree with pdoc\")\ndef pdoc_generate(project, logger):\n assert_can_execute(command_and_arguments=[\"pdoc\", \"--version\"],\n prerequisite=\"pdoc\",\n caller=pdoc_generate.__name__)\n\n logger.info(\"Generating pdoc documentation\")\n command_and_arguments = [\"pdoc\", \"--html\", \"isphere\", \"--all-submodules\", \"--overwrite\", \"--html-dir\", \"api-doc\"]\n source_directory = project.get_property(\"dir_source_main_python\")\n environment = {\"PYTHONPATH\": source_directory,\n \"PATH\": os.environ[\"PATH\"]}\n\n subprocess.check_call(command_and_arguments, shell=False, env=environment)\n","sub_path":"build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":4186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"106285027","text":"from armulator.armv6.opcodes.abstract_opcodes.str_immediate_thumb import StrImmediateThumb\nfrom armulator.armv6.opcodes.opcode import Opcode\nfrom armulator.armv6.bits_ops import zero_extend\n\n\nclass StrImmediateThumbT1(StrImmediateThumb, Opcode):\n def __init__(self, instruction, add, wback, index, t, n, imm32):\n Opcode.__init__(self, instruction)\n StrImmediateThumb.__init__(self, add, wback, index, t, n, imm32)\n\n def is_pc_changing_opcode(self):\n return False\n\n @staticmethod\n def from_bitarray(instr, processor):\n rt = instr[13:16]\n rn = instr[10:13]\n imm5 = instr[5:10]\n index = True\n add = True\n wback = False\n imm32 = zero_extend(imm5 + \"0b00\", 32)\n return StrImmediateThumbT1(instr, **{\"add\": add, \"wback\": wback, \"index\": index, \"t\": rt.uint, \"n\": rn.uint,\n \"imm32\": imm32})\n","sub_path":"armulator/armv6/opcodes/thumb_instruction_set/thumb_instruction_set_encoding_16_bit/thumb_load_store_single_data_item/str_immediate_thumb_t1.py","file_name":"str_immediate_thumb_t1.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"232799704","text":"from collections import Counter\nfrom functools import reduce\nfrom itertools import chain\n\nfrom expression.Function import Add, Multiply\nfrom expression.Value import Value\nfrom expression.simplifier.Simplifier import Simplifier\nfrom utils.expression_utils import filter_split\n\n\nclass AddNestedAddSimplifier(Simplifier):\n def can_simplify(self, expression):\n return isinstance(expression, Add) and \\\n len([expr for expr in expression.get_expressions() if isinstance(expr, Add)]) != 0\n\n def _simplify(self, expression):\n adds, other = filter_split(lambda x: isinstance(x, Add), expression.get_expressions())\n adds = list(chain(*map(lambda x: x.get_expressions(), adds)))\n\n return Add(*adds, *other)\n\n\nclass AddCombineValuesSimplifier(Simplifier):\n def can_simplify(self, expression):\n return isinstance(expression, Add) and \\\n len([expr for expr in expression.get_expressions() if isinstance(expr, Value)]) > 0\n\n def _simplify(self, expression):\n values, other = filter_split(lambda x: isinstance(x, Value), expression.get_expressions())\n value = reduce(lambda x, y: x + y, map(lambda x: x.get_numeric_value(), values))\n\n if len(other) == 0:\n return Value(value)\n if value == 0:\n if len(other) == 1:\n return other[0]\n return Add(*other)\n return Add(value, *other)\n\n\nclass AddCombineTermsSimplifier(Simplifier):\n def can_simplify(self, expression):\n return isinstance(expression, Add)\n\n def _simplify(self, expression):\n exprs = expression.get_expressions()\n counts = Counter(exprs)\n exprs = []\n for term in counts:\n freq = counts[term]\n if freq == 1:\n exprs.append(term)\n else:\n exprs.append(Value(freq) * term)\n\n if len(exprs) == 1:\n return exprs[0]\n return Add(*exprs)\n\nclass AddFactorSimplifier(Simplifier):\n def can_simplify(self, expression):\n\n return isinstance(expression, Add) and \\\n len([x for x in expression.get_expressions() if isinstance(x, Multiply)]) > 1\n\n def _simplify(self, expression):\n\n multiplies, others = filter_split(lambda x: isinstance(x, Multiply), expression.get_expressions())\n\n sets = [set(x.get_expressions()) for x in multiplies]\n c = Counter()\n for s in sets:\n c.update(s)\n term, count = c.most_common(1)[0]\n\n if count < 2:\n return expression\n\n in_terms = []\n\n for mult in multiplies:\n if term in mult.get_expressions():\n l = list(mult.get_expressions())\n l.remove(term)\n if len(l) == 1:\n in_terms.append(l[0])\n else:\n in_terms.append(Multiply(*l))\n else:\n others.append(mult)\n\n if len(others) == 0:\n return term * Add(*in_terms)\n return Add(term * Add(*in_terms), *others)\n\n\n\n\n\n\n\n","sub_path":"expression/simplifier/AddSimplifiers.py","file_name":"AddSimplifiers.py","file_ext":"py","file_size_in_byte":3063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"45513731","text":"import xml.etree.ElementTree as xml\n\n\nclass XML:\n fileName: str\n\n def __init__(self, fileName):\n self.fileName = fileName + \".xml\"\n self.openFile()\n\n def openFile(self):\n try:\n file = open(self.fileName, \"r\")\n except FileNotFoundError:\n self.createFile()\n\n # def createFile(self):\n # rootXML = xml.Element(\"settings\")\n #\n # text = xml.Element(\"text\")\n # text.text = \"Text\"\n # rootXML.append(text)\n # list = xml.Element(\"list\")\n # rootXML.append(list)\n #\n # item: xml.SubElement\n #\n # item = xml.SubElement(list, \"user\")\n # item.text = \"postgres\"\n #\n # item = xml.SubElement(list, \"password\")\n # item.text = \"xthyjskm2000\"\n #\n # item = xml.SubElement(list, \"host\")\n # item.text = \"localhost\"\n #\n # item = xml.SubElement(list, \"port\")\n # item.text = \"4\"\n #\n # item = xml.SubElement(list, \"database\")\n # item.text = \"keenup\"\n #\n # list = xml.Element(\"list\")\n # rootXML.append(list)\n #\n # item: xml.SubElement\n #\n # item = xml.SubElement(list, \"cron\")\n # item.text = \"30\"\n #\n # file = open(self.fileName, \"w\")\n # file.write(xml.tostring(rootXML, encoding=\"utf-8\", method=\"xml\").decode(encoding=\"utf-8\"))\n # file.close()\n\n def parsingFile(self, elements, text=True):\n tree = xml.ElementTree(file=self.fileName)\n rootXML = tree.getroot()\n for element in rootXML.iter(elements):\n if (text):\n return element.text\n return element\n\n","sub_path":"cre.py","file_name":"cre.py","file_ext":"py","file_size_in_byte":1653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"287498174","text":"from django.shortcuts import get_object_or_404\nfrom rest_framework import generics\nfrom rest_framework.permissions import AllowAny, IsAuthenticated\n\nfrom .serializers import ProfileSerializer, RegistrationSerializer, LoginSerializer, UserEmbeddedSerializer\nfrom .models import Profile\nfrom .serializers import ProfileSerializer\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom rest_framework.views import APIView\n# Create your views here.\n\n\nclass ProfileView(generics.ListCreateAPIView):\n serializer_class = ProfileSerializer\n queryset = Profile.objects.all()\n\n\nclass ProfileRUD(APIView):\n serializer_class = ProfileSerializer\n queryset = Profile.objects.all()\n permission_classes = [IsAuthenticated]\n\n def get(self, request, pk=None):\n queryset = self.queryset\n user = get_object_or_404(queryset, pk=pk)\n serializer = ProfileSerializer(user, many=False)\n return Response(serializer.data)\n\n def put(self, request, pk=None):\n serializer_context = {'request': request}\n user = get_object_or_404(self.queryset, pk=pk)\n serializer_data = request.data\n serializer = self.serializer_class(\n user,\n context=serializer_context,\n data=serializer_data,\n partial=True\n )\n serializer.is_valid(raise_exception=True)\n serializer.save()\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n def delete(self, request, pk=None):\n user = get_object_or_404(self.queryset, user__pk=pk)\n res = user.delete()\n return Response(res, status=status.HTTP_200_OK)\n\n\nclass IsRegisteredUser(APIView):\n permission_classes = (AllowAny,)\n queryset = Profile.objects.all()\n serializer_class = UserEmbeddedSerializer\n\n def post(self, request):\n email = request.data.get('email', {})\n queryset = self.queryset\n user = get_object_or_404(queryset, email=email)\n serializer = self.serializer_class(user, many=False)\n return Response(serializer.data)\n\n\nclass ProfileRegister(APIView):\n permission_classes = (AllowAny,)\n serializer_class = RegistrationSerializer\n\n def post(self, request):\n user = request.data\n\n serializer = self.serializer_class(data=user)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n\nclass ProfileLogin(APIView):\n permission_classes = (AllowAny,)\n serializer_class = LoginSerializer\n\n def post(self, request):\n user = request.data\n print(user)\n serializer = self.serializer_class(data=user)\n serializer.is_valid(raise_exception=True)\n print(serializer.data)\n return Response(serializer.data, status=status.HTTP_200_OK)\n","sub_path":"server/authentication/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"196995134","text":"import os\nimport time\n\nimport pyperclip\n\n\ndef first_task(input_data):\n count = 0\n for i in range(len(input_data)):\n value = input_data[i]\n a, b = value.split(\",\")\n a_start, a_end = a.split(\"-\")\n b_start, b_end = b.split(\"-\")\n a_set = set(range(int(a_start), int(a_end) + 1))\n b_set = set(range(int(b_start), int(b_end) + 1))\n min_len = min(len(a_set), len(b_set))\n inter_len = len(a_set.intersection(b_set))\n if inter_len == min_len and inter_len != 0:\n count += 1\n return count\n\n\ndef second_task(input_data):\n count = 0\n for i in range(len(input_data)):\n value = input_data[i]\n a, b = value.split(\",\")\n a_start, a_end = a.split(\"-\")\n b_start, b_end = b.split(\"-\")\n a_set = set(range(int(a_start), int(a_end) + 1))\n b_set = set(range(int(b_start), int(b_end) + 1))\n inter_len = len(a_set.intersection(b_set))\n if inter_len != 0:\n count += 1\n return count\n\n\ndef run_day():\n input_file = os.path.join(os.path.dirname(__file__), \"input.txt\")\n input_data = list(map(lambda line: line.strip(), open(input_file, \"r\")))\n\n t_start = time.time()\n first_answer = first_task(input_data)\n t_end = time.time()\n first_time = round(t_end - t_start, 2)\n if first_answer is not None:\n pyperclip.copy(str(first_answer))\n pyperclip.paste()\n\n print(\"#############################\")\n print(\"The answer to the 1st task is\")\n print(first_answer, f\"in {first_time} seconds\")\n\n t_start = time.time()\n second_answer = second_task(input_data)\n t_end = time.time()\n second_time = round(t_end - t_start, 3)\n if second_answer is not None:\n pyperclip.copy(str(second_answer))\n pyperclip.paste()\n\n print()\n print(\"The answer to the 2nd task is\")\n print(second_answer, f\"in {second_time} seconds\")\n print(\"#############################\")\n\n\nif __name__ == \"__main__\":\n run_day()\n","sub_path":"aoc_2022/src/2022_day_4/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"50187825","text":"\n# coding: utf-8\n\n# In[1]:\n\n'''start exploring the Umich data '''\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nfrom matplotlib.colors import LogNorm \n\nimport sklearn\nfrom scipy import stats\n\nget_ipython().magic('matplotlib inline')\nsns.set_style('white')\n\n\n# In[3]:\n\ndf = pd.read_table('/Users/soniat/Documents/finch/data/Umich_run206/UMich-run206_RDP.csv', sep=',')\ndf.iloc[:,7:].fillna(0)\ndf.ix[:5,6:15]\n\n\n# In[4]:\n\nsamples = df.columns.values[7:]\n\n\n# In[5]:\n\n''' map OB donors to sample names '''\ndonor2sample_ID = pd.read_table('/Users/soniat/Documents/finch/data/Umich_run206/UMich-run206_donor2sample.tab',)\n\n\n# In[6]:\n\n''' plate data hand entered from the workbook Umich sent; OBSTDs only'''\nsample2plate = dict(zip(\n ['OBSTD'+i for i in\n ('599', '557', '687',\n '572','368','418',\n '713','1171','286',\n '1061','773','652')],\n ['A', 'A', 'A','B', 'B', 'B','C', 'C', 'C','D', 'D', 'D'] ))\nsamp2plate = pd.DataFrame(data=sample2plate, index=['plate']).T\n\n\n# In[7]:\n\nobstds = [s for s in samples if s.startswith(\"OBSTD\")]\nassert(set(samp2plate.sort_values(by='plate').index.values) == set(obstds))\nobstds = samp2plate.sort_values(by='plate').index.values\n\n\n# In[11]:\n\n''' If we filter out reads that couldn't be classified down to a particular level, what fraction are lost? '''\n\nfig,axes = plt.subplots(3,2, figsize=(7,8), sharey=True)\nax=axes.flatten()\n# filter reads not classified at various levels of taxonomy (pkgofgs)\nfor i,pkgofgs in enumerate(df.columns[2:7]):\n nullfrac = df.loc[ df[pkgofgs].isnull(),obstds].sum() / df.loc[:,obstds].sum()\n #nullfrac = (df.loc[df[pkgofgs].isnull()]).sum() / df.sum()\n ax[i].hist( nullfrac )\n ax[i].set_title(pkgofgs)\n ax[i].annotate('OBSTD368',\n xy=(nullfrac['OBSTD368'],0),\n xytext = (nullfrac['OBSTD368']*1.02, 0.3),\n arrowprops=dict(facecolor='black', shrink=0.01,frac=0.2, width=2))\n \nax[4].set_xticklabels(ax[i].get_xticklabels(), rotation=45)\n\nfig.tight_layout()\nfig.text(0.5,1, 'fraction of unclassified reads per sample', ha='center', fontsize=14);\nfig.text(1,0.5, '''\nThe last plots were misleading due to unequal read number. \nOBSTD368 is no longer an outlier. \nThere is no consistent outlier\n''', fontsize=13);\n\n\n# In[55]:\n\n''' super quick look at poo stew vs average donor community diversity '''\ndonor_sids = list(donor2sample_ID.Sample_ID.values)\nobstds = list(obstds)\n\nfig,ax=plt.subplots(2,5, figsize=(15,4), sharex=True)\nfor j,pkgofgs in enumerate(df.columns[2:7]):\n temp = df.loc[~df[pkgofgs].isnull()]\n richness = temp.shape[0] - temp[donor_sids].isnull().sum()\n ax[0,j].hist( richness )\n ax[0,j].set_ylabel('OBSTDs')\n richness = temp.shape[0] - temp[obstds].isnull().sum()\n ax[0,j].set_title('taxa={}'.format(pkgofgs))\n ax[1,j].hist( richness )\n ax[1,j].set_ylabel('donors')\n\n\nfig.text(-0.015,0.5,'counts: richness per sample', rotation=90, fontsize=14, va='center',ha='center')\nfig.text(0.5,1, 'poo-stews significantly richer than healthy donors at all taxonomic levels', fontsize=15, va='center',ha='center');\nfig.tight_layout()\n\n\n# In[30]:\n\n\ntemp = df.loc[~df[pkgofgs].isnull()]\nrichness = temp.shape[0] - temp[donor_sids].isnull().sum()\n\n\n# In[6]:\n\n''' Jensen-shannon divergence '''\ndef jsd(x,y): \n # @author: jonathanfriedman\n import warnings\n warnings.filterwarnings(\"ignore\", category = RuntimeWarning)\n x = np.array(x)\n y = np.array(y)\n d1 = x*np.log2(2*x/(x+y))\n d2 = y*np.log2(2*y/(x+y))\n d1[np.isnan(d1)] = 0\n d2[np.isnan(d2)] = 0\n d = 0.5*np.sum(d1+d2) \n return d\n\n\n# In[287]:\n\n''' try a few filtering levels to remove poorly -classified reads'''\n\nmat_min_ord = df.loc[~df['Order'].isnull()]\nmat_min_fam = df.loc[~df['Family'].isnull()]\nmat_min_gen = df.loc[~df['Genus'].isnull()]\n\ndf.shape,mat_min_ord.shape\n\nprint('''fraction reads remaining for filter requiring classification at: \\n{:.3f} order \\n{:.3f} family \\n{:.3f} genus'''.format(\n*[ tot/sum(df.sum()) for tot in (sum(mat_min_ord.sum()), sum(mat_min_fam.sum()), sum(mat_min_gen.sum())) ] ))\n\n\n# In[8]:\n\nprint(\"Number of families:\")\nmat_min_fam[obstds].apply(lambda x: (x>0).sum())\n\n\n# In[264]:\n\n''' Calculate JSD at 2 filtering levels'''\nfrom itertools import combinations\n\nmat_min_fam_jsd = pd.DataFrame()\nmat_min_gen_jsd = pd.DataFrame()\nfor (s1,s2) in combinations(samp2plate.sort_values(by='plate').index.values,2):\n mat_min_gen_jsd.loc[s1,s2] = jsd(mat_min_gen[s1]/ np.nansum(mat_min_gen[s1]),\n mat_min_gen[s2]/ np.nansum(mat_min_gen[s2]))\n mat_min_fam_jsd.loc[s1,s2] = jsd(mat_min_fam[s1]/ np.nansum(mat_min_fam[s1]),\n mat_min_fam[s2]/ np.nansum(mat_min_fam[s2]))\n\n\n# In[265]:\n\n\ncmap = sns.cubehelix_palette(as_cmap=True,rot=-0) # (start=2.8, rot=.1,) # \"YlGnBu\"\nfig,ax=plt.subplots(1,2,figsize=(10,4))\nsns.heatmap(mat_min_fam_jsd, ax=ax[0], cmap=cmap)\nax[0].set_title(\"distance between standards @family (JSD)\", fontsize=12,)\nsns.heatmap(mat_min_gen_jsd, ax=ax[1], cmap=cmap)\nax[1].set_title(\"distance between standards @genus (JSD)\", fontsize=12,)\nfig.tight_layout()\n\n\n# In[266]:\n\n# reorganize data tidily for seaborn \nmat_min_fam_jsd_td = mat_min_fam_jsd.stack().reset_index()\nmat_min_fam_jsd_td.columns = ['sample1','sample2','jsd']\n\nmat_min_gen_jsd_td = mat_min_gen_jsd.stack().reset_index()\nmat_min_gen_jsd_td.columns = ['sample1','sample2','jsd']\n\ntemp = pd.merge(mat_min_gen_jsd_td, samp2plate, left_on='sample1', right_index=True)\ntemp.rename(columns={'plate':'sample1_plate'}, inplace=True)\n\njsd_td = pd.merge(temp, samp2plate, left_on='sample2', right_index=True)\njsd_td.rename(columns={'plate':'sample2_plate'}, inplace=True);\n\n\n# In[289]:\n\n''' examine distance at genus level '''\nfig,ax = plt.subplots(1,2, figsize=(8,4),sharey=True)\ncolors=sns.color_palette(\"bright\", 5)\n\n\njsd_td['within_or_between'] =jsd_td.apply(lambda x:\n (x['sample1_plate'],'between')[int(x['sample1_plate']!=x['sample2_plate'])],\n axis=1)\nttest_Avsbetween = stats.ttest_ind(\n jsd_td.loc[jsd_td['within_or_between']=='between','jsd'].values,\n jsd_td.loc[jsd_td['within_or_between']=='A','jsd'].values,\n equal_var=False)\n\nord1=sorted(set(jsd_td['within_or_between']))\nsns.stripplot(data=jsd_td, y='jsd', ax=ax[0],\n x='within_or_between', \n order=ord1,\n jitter=True, split=True,\n linewidth=1, edgecolor='gray',\n palette=dict(zip(ord1,colors)) )\n\nsns.boxplot(data=jsd_td, y='jsd', ax=ax[0],\n x='within_or_between', order=ord1, \n fliersize=0,\n palette=dict(zip(ord1,colors)) )\n\nax[0].set_xlabel('plate')\n\nord2=('within','between') \njsd_td['within_or_between'] = jsd_td.apply(lambda x:\n ord2[int(x['sample1_plate']!=x['sample2_plate'])], axis=1)\nsns.stripplot(data=jsd_td, y='jsd', ax=ax[1],\n x='within_or_between', \n order=ord2 ,\n jitter=True, split=True,linewidth=1, edgecolor='gray',\n palette=dict(zip(ord2,['lightgrey',colors[4]]))\n )\n\nsns.boxplot(data=jsd_td, y='jsd', ax=ax[1],\n x='within_or_between', order=ord2 , \n fliersize=0,\n palette=dict(zip(ord2,['lightgrey',colors[4]])))\nax[1].set_xlabel('within_or_between'+\"_plates\") \nfig.tight_layout()\nfig.text(0.5, 1, \"poo stew replicates: community distances within-plate and between-plate \", \n ha='center', fontsize=15);\n\n\n# In[285]:\n\n''' technically they are different '''\nprint('within-plate all vs between-plate all:\\n{}'.format(\n stats.ks_2samp( jsd_td.loc[jsd_td['within_or_between']=='between','jsd'].values,\n jsd_td.loc[jsd_td['within_or_between']!='between','jsd'].values ) ))\n\nprint('within-plate all vs between-plate all:\\n{}'.format(\nstats.ttest_ind(jsd_td.loc[jsd_td['within_or_between']=='between','jsd'].values,\n jsd_td.loc[jsd_td['within_or_between']!='between','jsd'].values,\n equal_var=False)))\n\nprint('within-plate A vs between-plate all:\\n{}'.format(ttest_Avsbetween))\n\n\n# ### Take home: \n# 1. the between plate variance is slightly but significantly greater than aggregated within-plate; though this is not true for all plates individually\n# 2. the poo stew replicates were placed next to each other on the plates; i don't think its good practice (though not responsible for this result, if anything it would bias us the opposite direction ) \n\n# In[ ]:\n\n\n\n","sub_path":"notebooks/OBSTDs/Umich206_OBstds.py","file_name":"Umich206_OBstds.py","file_ext":"py","file_size_in_byte":8681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"572456308","text":"import os\r\nx = [\"\"] \r\nlines=[\"\"]\r\nNames=[\"\"]\r\ndef Main():\r\n script_dir = os.path.dirname(__file__)\r\n rel_path = \"Data\\input_data2.txt\"\r\n rel_names = \"Data\\input_name.txt\"\r\n rel_nameout = \"output\\output_merged_data.txt\"\r\n abs_file_path1 = os.path.join(script_dir, rel_path)\r\n abs_file_path2 = os.path.join(script_dir, rel_names)\r\n \r\n \r\n Names = open(abs_file_path2).readlines() \r\n Names = map(lambda s: s.strip(), Names)\r\n \r\n Names = filter(None, Names)\r\n lines = open(abs_file_path1).readlines() \r\n lines = map(lambda s: s.strip(), lines)\r\n \r\n lines = filter(None, lines)\r\n for i, val in enumerate(Names):\r\n j= len(lines)\r\n if (i < j):\r\n linesstring = lines[i]\r\n x2 = linesstring\r\n k = x2.split(',')\r\n j = [k[0]] + [','+l for l in k[1:]]\r\n if(val.find(j[0])!=-1):\r\n \r\n val = val.replace(j[0], linesstring)\r\n x.append(val)\r\n else:\r\n x.append(val)\r\n x.append(linesstring)\r\n else:\r\n x.append(val) \r\n if os.path.exists(rel_nameout):\r\n open(rel_nameout, 'w').close()\r\n\r\n else:\r\n open(rel_nameout, 'w')\r\n \r\n file1 = open(rel_nameout , \"w\")\r\n for item in x:\r\n file1.write(\"%s\\n\" % item)\r\n\r\n file1.close()\r\n print (x)\r\n stayWatch = raw_input('check results above , [input] something to exit:')\r\nMain()\t\r\n\r\n \r\n","sub_path":"Option2/Program.py","file_name":"Program.py","file_ext":"py","file_size_in_byte":1397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"244067478","text":"import sys\nimport math\n\n# Thor moves on a map which is 40 wide by 18 high. \n# Note that the coordinates (X and Y) start at the top left! \n# This means the most top left cell has the coordinates \"X=0,Y=0\" \n# and the most bottom right one has the coordinates \"X=39,Y=17\".\n\n# light_x: the X position of the light of power\n# light_y: the Y position of the light of power\n# initial_tx: Thor's starting X position\n# initial_ty: Thor's starting Y position\nlight_x, light_y, initial_tx, initial_ty = [int(i) for i in input().split()]\nthor_x = initial_tx\nthor_y = initial_ty\n\n# game loop\nwhile True:\n direction = \"\"\n remaining_turns = int(input()) # The remaining amount of turns Thor can move. Do not remove this line.\n \n if thor_y > light_y:\n thor_y -=1\n direction = \"N\"\n elif thor_y < light_y:\n thor_y +=1\n direction = \"S\"\n \n if thor_x < light_x:\n thor_x +=1\n direction = direction + \"E\"\n \n elif thor_x > light_x:\n thor_x -=1\n direction = direction + \"W\"\n \n \n\n\n # A single line providing the move to be made: N NE E SE S SW W or NW\n print(direction)\n","sub_path":"thor.py","file_name":"thor.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"621475521","text":"\"\"\"Pusher component for use in a Node class.\"\"\"\n\n# Import Built-Ins\nimport logging\nfrom queue import Queue\nfrom threading import Thread, Event\n\n# Import Third-Party\nimport zmq\n\n# Import home-grown\n\n\n# Init Logging Facilities\nlog = logging.getLogger(__name__)\n\n\nclass Pusher(Thread):\n \"\"\"\n Allows pushing data to pullers.\n\n The pushing is realized with ZMQ's Push sockets, and supports pushing\n to multiple pullers.\n\n The :meth:`hermes.Push.run` method continuously checks for data on the internal q,\n which is fed by the :meth:`hermes.Pusher.push` method.\n \"\"\"\n\n def __init__(self, push_addr, name, ctx=None):\n \"\"\"\n Initialize Instance.\n\n :param push_addr: Address this instance should connect to\n :param name: Name to give this :class:`hermes.Pusher` instance.\n \"\"\"\n self.push_addr = push_addr\n self._running = Event()\n self.sock = None\n self.q = Queue()\n self.ctx = ctx or zmq.Context().instance()\n super(Pusher, self).__init__(name=name)\n\n def publish(self, envelope):\n \"\"\"\n Publish the given data to all current pullers.\n\n :param envelope: :class:`hermes.Envelope` instance\n :return: None\n \"\"\"\n if self.sock:\n self.q.put(envelope)\n return True\n return False\n\n def stop(self, timeout=None):\n \"\"\"\n Stop the :class:`hermes.Pusher` instance.\n\n :param timeout: time in seconds until :exc:`TimeOutError` is raised\n :return: :class:`None`\n \"\"\"\n log.info(\"Stopping Pusher instance..\")\n self.join(timeout=timeout)\n log.info(\"..done.\")\n\n def join(self, timeout=None):\n \"\"\"\n Join the :class:`hermes.Pusher` instance and shut it down.\n\n Clears the :attr:`hermes.Pusher._running` flag to gracefully terminate the run loop.\n\n :param timeout: timeout in seconds to wait for :meth:`hermes.Pusher.join` to finish\n :return: :class:`None`\n \"\"\"\n log.debug(\"Clearing _running state..\")\n self._running.clear()\n log.debug(\"Closing socket..\")\n try:\n self.sock.close()\n except AttributeError:\n log.debug(\"Socket was already closed!\")\n pass\n super(Pusher, self).join(timeout)\n\n def run(self):\n \"\"\"\n Custumized run loop to push data.\n\n Sets up a ZMQ push socket and sends data as soon as it is available\n on the internal Queue at :attr:`hermes.Pusher.q`.\n\n :return: :class:`None`\n \"\"\"\n self._running.set()\n ctx = zmq.Context()\n self.sock = ctx.socket(zmq.PUSH)\n log.info(\"Connecting Pusher to zmq.PULL Socket at %s..\" % self.push_addr)\n self.sock.connect(self.push_addr)\n log.info(\"Success! Executing Pusher loop..\")\n while self._running.is_set():\n if not self.q.empty():\n cts_msg = self.q.get(block=False)\n frames = cts_msg.convert_to_frames()\n log.debug(\"Sending %r ..\", cts_msg)\n try:\n self.sock.send_multipart(frames)\n except zmq.error.ZMQError as e:\n log.error(\"ZMQError while sending data (%s), \"\n \"stopping Pusher\", e)\n break\n else:\n continue\n\n ctx.destroy()\n self.sock = None\n log.info(\"Loop terminated.\")\n\n","sub_path":"hermes/pusher.py","file_name":"pusher.py","file_ext":"py","file_size_in_byte":3459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"599004853","text":"# MIT License\n#\n# Copyright (c) 2018 Herbert Shin\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\nredis.py\n--------\n\"\"\"\n\nfrom __future__ import absolute_import\n\nimport ast\nimport contextlib\nimport hashlib\nimport logging\nimport redis\n\ntry:\n import cPickle as pickle\nexcept ImportError:\n import pickle\n\nfrom lib.random import validate_identifier\n\nlogger = logging.getLogger()\n\n\nREDIS_PASSWORD = \"msl1234~\"\n\n\n@contextlib.contextmanager\ndef allocate_redis_client(addr: str = \"127.0.0.1\", port: int = 6379, db: str = 0):\n \"\"\" allocate new redis client and safely close it \"\"\"\n pool = redis.ConnectionPool(host=addr, port=port, password=REDIS_PASSWORD, db=db)\n client = redis.StrictRedis(connection_pool=pool)\n try:\n yield client\n finally:\n pool.disconnect()\n\n\ndef parse_redis_result(result, default=None):\n \"\"\" convert redis results into Python objects\n @result -- results from Redis.\n \"\"\"\n if result is None:\n return default\n elif isinstance(result, bytes):\n # first decode `bytes` into string and safely load as AST object.\n try:\n parsed_result = ast.literal_eval(result.decode())\n except UnicodeDecodeError:\n parsed_result = pickle.loads(result)\n return parsed_result # otherwise, return Python object.\n\n\n# common table names\nTASK_COUNT = \"tasks-count\" # pending tasks count\nTASK_QUEUE = \"tasks-queue\" # active and pending task queue\n\n\ndef get_task_count(*a, **kw) -> int:\n \"\"\" return total tasks count \"\"\"\n with allocate_redis_client() as client:\n return parse_redis_result(client.get(TASK_COUNT), default=0)\n\n\ndef get_task_queue(*a, **kw) -> list:\n \"\"\" return total tasks \"\"\"\n with allocate_redis_client() as client:\n return parse_redis_result(client.get(TASK_QUEUE), default=[])\n\n\ndef get_key_from_redis(key: str):\n \"\"\" get identifier from database\n @key -- redis store key\n \"\"\"\n if not key:\n return\n\n with allocate_redis_client() as client:\n return client.get(key)\n\n\ndef add_key_to_redis(key: str, value: dict, noqueue=False, timeout=60 * 60 * 24) -> bool:\n \"\"\" add identifier to database\n @key -- redis store key\n @value -- redis store value\n @timeout -- redis key expiration\n \"\"\"\n if not key or key in [TASK_COUNT, TASK_QUEUE]:\n return\n\n with allocate_redis_client() as client:\n queue = get_task_queue()\n if key in queue:\n return\n\n # update task queue\n queue.append(key)\n if not noqueue:\n client.set(TASK_QUEUE, queue)\n client.incr(TASK_COUNT) # update pending count\n client.setex(key, timeout, value)\n\n # update pending count\n logger.debug(\"successfully added redis key: `%s`\", key)\n return get_task_count()\n logger.info(\"failed to add redis key: `%s`\", key)\n\n\ndef delete_key_from_redis(key: str) -> bool:\n \"\"\" remove key from database\n @key -- `reference id`\n \"\"\"\n if key in [TASK_COUNT, TASK_QUEUE]:\n return\n\n with allocate_redis_client() as client:\n if not client.get(key):\n return\n\n # delete task and update task queue\n queue = get_task_queue()\n if key in queue:\n queue.pop(queue.index(key))\n client.set(TASK_QUEUE, queue)\n client.decr(TASK_COUNT) # update pending count\n client.delete(key)\n\n logger.warning(\"successfully removed redis key: `%s`\", key)\n return get_task_count()\n logger.error(\"failed to remove redis key: `%s`\", key)\n\n\n__all__ = [\n \"TASK_COUNT\",\n \"TASK_QUEUE\",\n \"add_key_to_redis\",\n \"allocate_redis_client\",\n \"delete_key_from_redis\",\n \"get_key_from_redis\",\n \"get_task_count\",\n \"get_task_queue\",\n \"parse_redis_result\",\n]\n","sub_path":"udta/udta-server/src/lib/redis.py","file_name":"redis.py","file_ext":"py","file_size_in_byte":4825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"314934644","text":"import numpy\n\nfrom frames.objects import FeatureFrame\n\nCOLOR_NAMES = {\n 'aliceblue': '#F0F8FF',\n 'antiquewhite': '#FAEBD7',\n 'aqua': '#00FFFF',\n 'aquamarine': '#7FFFD4',\n 'azure': '#F0FFFF',\n 'beige': '#F5F5DC',\n 'bisque': '#FFE4C4',\n 'black': '#000000',\n 'blanchedalmond': '#FFEBCD',\n 'blue': '#0000FF',\n 'blueviolet': '#8A2BE2',\n 'brown': '#A52A2A',\n 'burlywood': '#DEB887',\n 'cadetblue': '#5F9EA0',\n 'chartreuse': '#7FFF00',\n 'chocolate': '#D2691E',\n 'coral': '#FF7F50',\n 'cornflowerblue': '#6495ED',\n 'cornsilk': '#FFF8DC',\n 'crimson': '#DC143C',\n 'cyan': '#00FFFF',\n 'darkblue': '#00008B',\n 'darkcyan': '#008B8B',\n 'darkgoldenrod': '#B8860B',\n 'darkgray': '#A9A9A9',\n 'darkgreen': '#006400',\n 'darkkhaki': '#BDB76B',\n 'darkmagenta': '#8B008B',\n 'darkolivegreen': '#556B2F',\n 'darkorange': '#FF8C00',\n 'darkorchid': '#9932CC',\n 'darkred': '#8B0000',\n 'darksalmon': '#E9967A',\n 'darkseagreen': '#8FBC8F',\n 'darkslateblue': '#483D8B',\n 'darkslategray': '#2F4F4F',\n 'darkturquoise': '#00CED1',\n 'darkviolet': '#9400D3',\n 'deeppink': '#FF1493',\n 'deepskyblue': '#00BFFF',\n 'dimgray': '#696969',\n 'dodgerblue': '#1E90FF',\n 'firebrick': '#B22222',\n 'floralwhite': '#FFFAF0',\n 'forestgreen': '#228B22',\n 'fuchsia': '#FF00FF',\n 'gainsboro': '#DCDCDC',\n 'ghostwhite': '#F8F8FF',\n 'gold': '#FFD700',\n 'goldenrod': '#DAA520',\n 'gray': '#808080',\n 'green': '#008000',\n 'greenyellow': '#ADFF2F',\n 'honeydew': '#F0FFF0',\n 'hotpink': '#FF69B4',\n 'indianred': '#CD5C5C',\n 'indigo': '#4B0082',\n 'ivory': '#FFFFF0',\n 'khaki': '#F0E68C',\n 'lavender': '#E6E6FA',\n 'lavenderblush': '#FFF0F5',\n 'lawngreen': '#7CFC00',\n 'lemonchiffon': '#FFFACD',\n 'lightblue': '#ADD8E6',\n 'lightcoral': '#F08080',\n 'lightcyan': '#E0FFFF',\n 'lightgoldenrodyellow': '#FAFAD2',\n 'lightgreen': '#90EE90',\n 'lightgray': '#D3D3D3',\n 'lightpink': '#FFB6C1',\n 'lightsalmon': '#FFA07A',\n 'lightseagreen': '#20B2AA',\n 'lightskyblue': '#87CEFA',\n 'lightslategray': '#778899',\n 'lightsteelblue': '#B0C4DE',\n 'lightyellow': '#FFFFE0',\n 'lime': '#00FF00',\n 'limegreen': '#32CD32',\n 'linen': '#FAF0E6',\n 'magenta': '#FF00FF',\n 'maroon': '#800000',\n 'mediumaquamarine': '#66CDAA',\n 'mediumblue': '#0000CD',\n 'mediumorchid': '#BA55D3',\n 'mediumpurple': '#9370DB',\n 'mediumseagreen': '#3CB371',\n 'mediumslateblue': '#7B68EE',\n 'mediumspringgreen': '#00FA9A',\n 'mediumturquoise': '#48D1CC',\n 'mediumvioletred': '#C71585',\n 'midnightblue': '#191970',\n 'mintcream': '#F5FFFA',\n 'mistyrose': '#FFE4E1',\n 'moccasin': '#FFE4B5',\n 'navajowhite': '#FFDEAD',\n 'navy': '#000080',\n 'oldlace': '#FDF5E6',\n 'olive': '#808000',\n 'olivedrab': '#6B8E23',\n 'orange': '#FFA500',\n 'orangered': '#FF4500',\n 'orchid': '#DA70D6',\n 'palegoldenrod': '#EEE8AA',\n 'palegreen': '#98FB98',\n 'paleturquoise': '#AFEEEE',\n 'palevioletred': '#DB7093',\n 'papayawhip': '#FFEFD5',\n 'peachpuff': '#FFDAB9',\n 'peru': '#CD853F',\n 'pink': '#FFC0CB',\n 'plum': '#DDA0DD',\n 'powderblue': '#B0E0E6',\n 'purple': '#800080',\n 'red': '#FF0000',\n 'rosybrown': '#BC8F8F',\n 'royalblue': '#4169E1',\n 'saddlebrown': '#8B4513',\n 'salmon': '#FA8072',\n 'sandybrown': '#FAA460',\n 'seagreen': '#2E8B57',\n 'seashell': '#FFF5EE',\n 'sienna': '#A0522D',\n 'silver': '#C0C0C0',\n 'skyblue': '#87CEEB',\n 'slateblue': '#6A5ACD',\n 'slategray': '#708090',\n 'snow': '#FFFAFA',\n 'springgreen': '#00FF7F',\n 'steelblue': '#4682B4',\n 'tan': '#D2B48C',\n 'teal': '#008080',\n 'thistle': '#D8BFD8',\n 'tomato': '#FF6347',\n 'turquoise': '#40E0D0',\n 'violet': '#EE82EE',\n 'wheat': '#F5DEB3',\n 'white': '#FFFFFF',\n 'whitesmoke': '#F5F5F5',\n 'yellow': '#FFFF00',\n 'yellowgreen': '#9ACD32'\n}\n\n\ndef preprocess_input(s):\n try:\n s = int(s)\n return s\n except ValueError:\n pass\n try:\n s = float(s)\n return s\n except ValueError:\n pass\n if s == 'true':\n return True\n elif s == 'false':\n return False\n try:\n s = str(s)\n return s\n except ValueError:\n pass\n return s\n\n\ndef process_form_input(form_type, form_dicts, form_input):\n for element_dict in form_dicts:\n element_id = element_dict['type']+'_'+form_type+'_'+element_dict['name']\n if (element_id + '_value') in form_input:\n element_dict['value'] = preprocess_input(form_input[element_id + '_value'])\n if (element_id + '_text') in form_input:\n element_dict['text'] = preprocess_input(form_input[element_id + '_text'])\n return form_dicts\n\n\ndef initialize_form_dicts(form_dicts):\n try:\n for element_dict in form_dicts:\n element_dict['value'] = element_dict['default']\n return form_dicts\n except KeyError:\n raise ValueError(\"Invalid form dictionary\")\n\n\ndef gen_empty_data_frame_from_data_setting(data_setting):\n data_source_template = dict()\n for element_dict in data_setting:\n data_source_template[element_dict['name']] = []\n return FeatureFrame(frame=data_source_template)\n\n\ndef form_dicts_to_dict(form_dicts):\n form_dict = dict()\n for element_dict in form_dicts:\n form_dict[element_dict['name']] = {k: element_dict[k]\n for k in element_dict\n if k != 'name'}\n return form_dict\n\n\ndef color_name_to_code(color_name):\n if color_name not in COLOR_NAMES:\n return None\n else:\n return COLOR_NAMES[color_name]\n\n\ndef digitize(l, n_bin=None, space=None):\n if (not isinstance(l, list)\n or not all(isinstance(i, int)\n or isinstance(i, float)\n or isinstance(i, long)\n for i in l)):\n raise ValueError(\"Digitize: Invalid input list\")\n if not space:\n if not isinstance(n_bin, int):\n raise ValueError(\"Digitize: Invalid number of bins\")\n hist, bin_edges = numpy.histogram(l, n_bin-1)\n return numpy.digitize(l, bin_edges)\n elif not n_bin:\n if not isinstance(l, list) or not all(isinstance(i, int) for i in space):\n raise ValueError(\"Digitize: invalid range\")\n try:\n map = {v: i for i, v in enumerate(space)}\n return [map[i] for i in l]\n except KeyError:\n raise ValueError(\"Digitize: find value not in the given value space\")\n else:\n raise ValueError(\"Digitize: must indicate number of bins or range\")","sub_path":"rendering/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":6721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"24724475","text":"import turicreate as tc\n\n# loading data set\nsf = tc.SFrame('Reviews.csv')\n\n# cleaning data\nsf = sf.remove_columns(['HelpfulnessNumerator', 'HelpfulnessDenominator', 'Time', 'Id', 'ProductId', 'UserId', 'ProfileName', 'Summary'])\nsf['word_count'] = tc.text_analytics.count_words(sf['Text'])\nsf = sf[sf['Score'] != 3]\nsf['sentiment'] = sf['Score'] >= 4\n\n# creating model\ntrain_data, test_data = sf.random_split(0.8, seed=0)\nsentiment_model = tc.logistic_classifier.create(train_data,\n target='sentiment',\n features=['word_count'],\n validation_set=test_data)\n\n# saving model\nsentiment_model.save('my_model')\n","sub_path":"sentiment_model.py","file_name":"sentiment_model.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"277063884","text":"#!/usr/bin/env python\n\nfrom PySide import QtCore, QtGui\n\nfrom tfl_api_query import *\nfrom query_urls import *\nfrom result_filters import *\nimport pickle\n\ndef makeTreeView():\n view = QtGui.QTreeView()\n view.setModel(model)\n view.setWindowTitle(\"Simple Tree Model\")\n view.expandAll()\n return view\n\n\ndef ListDictIter(ld):\n if isinstance(ld, list):\n for x in ld:\n ListDictIter(x)\n elif isinstance(ld, dict):\n pass\n\n\ndef MakeVBoxLayout(views):\n mainLayout = QtGui.QVBoxLayout()\n for v in views:\n mainLayout.addWidget(v)\n return mainLayout\n\n\ndef GrabDictAsHtml():\n #s = grab( AddAppId(JourneyResults()), cache=True )\n s = grab( AddAppId(JourneyResultsEx()), cache=True )\n #s = grab( AddAppId(Timetable()), cache=True )\n print(\"len(s) = {}\", len(s))\n s = json.loads(s)\n s = GetJourneyResult(s, 0)\n #for x in s:\n # if \"vehicleId\" in x and x['vehicleId']==\"226\":\n # d = x\n # s = { \"currentLocation\":d[\"currentLocation\"], \\\n # \"vehicleId\":d[\"vehicleId\"], \\\n # \"timeToStation\":d[\"timeToStation\"],\n # \"naptanId\":d[\"naptanId\"]\n # }\n # s = d\n # break\n s = json.dumps(s, indent=4)\n s = s.replace(\"\\n\",\"
\")\n s = s.replace(\" \", \" \")\n return s\n\n\nif __name__ == '__main__':\n\n import sys\n import json\n\n app = QtGui.QApplication(sys.argv)\n\n s = GrabDictAsHtml()\n lineEdit = QtGui.QLineEdit()\n pte = QtGui.QPlainTextEdit()\n btn = QtGui.QPushButton(\"Refresh\")\n\n def updateText(pte):\n #print(pte)\n pte.clear()\n pte.appendHtml(str(GrabDictAsHtml()))\n\n btn.clicked.connect( lambda : updateText(pte) ) \n updateText(pte)\n layout = MakeVBoxLayout([ lineEdit, pte, btn ])\n view = QtGui.QWidget()\n view.setLayout(layout)\n view.resize(600, 600)\n view.show()\n sys.exit(app.exec_())\n\n","sub_path":"python/tfl_api_debug/query_gui.py","file_name":"query_gui.py","file_ext":"py","file_size_in_byte":1952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"377723259","text":"#!/usr/bin/env python\nimport os\nfrom glob import glob\n\nimport numpy as np\n\nimport ROOT\nfrom ROOT import TCanvas, TPad, TFile, TPaveText, TLegend\nfrom ROOT import gBenchmark, gStyle, gROOT, TStyle\nfrom ROOT import TH1D, TF1, TGraphErrors, TMultiGraph\n\nfrom math import sqrt\n\nfrom array import array\n\nimport tdrstyle\ntdrstyle.setTDRStyle()\n\nimport CMS_lumi\n\n#change the CMS_lumi variables (see CMS_lumi.py)\nCMS_lumi.lumi_13TeV = '13 TeV'\nCMS_lumi.writeExtraText = 1\n#CMS_lumi.extraText = 'Preliminary'\nCMS_lumi.extraText = 'Simulation'\n\niPos = 0\niPeriod = 0\n\ngStyle.SetOptFit(0)\n\ndef loadcanvas(name):\n canvas = TCanvas(name,name,400,20,1400,1000)\n canvas.SetFillColor(0)\n canvas.SetBorderMode(0)\n canvas.SetFrameFillStyle(0)\n canvas.SetFrameBorderMode(0)\n canvas.SetTickx(0)\n canvas.SetTicky(0)\n return canvas\n\ndef loadlegend(top, bottom, left, right):\n relPosX = 0.001\n relPosY = 0.005\n posX = 1 - right - relPosX*(1-left-right)\n posY = 1 - top - relPosY*(1-top-bottom)\n legendOffsetX = 0.0\n legendOffsetY = - 0.05\n textSize = 0.05\n textFont = 60\n legendSizeX = 0.4\n legendSizeY = 0.2\n legend = TLegend(posX-legendSizeX+legendOffsetX,posY-legendSizeY+legendOffsetY,posX+legendOffsetX,posY+legendOffsetY)\n legend.SetTextSize(textSize)\n legend.SetLineStyle(0)\n legend.SetBorderSize(0)\n return legend\n\nhistos={}\neosDir='root://cmsxrootd.fnal.gov//store/user/ddicroce/test'\n#eosDir='/eos/uscms/store/user/ddicroce/test'\n\nfiles_ = []\nfirstfile = True\nfilelist = '/uscms/home/ddicroce/nobackup/TauClassifier/CMSSW_10_2_20_UL/src/MLAnalyzer/list_HTauTau_biased.txt'\n#filelist= 'testlist_sim_Jul14.txt'\nwith open(filelist) as list_:\n content = list_.readlines()\npaths = [x.strip() for x in content] \nprint(paths)\n\nfor path in paths:\n #print(path)\n files_.append( TFile.Open(path) )\n #print(file)\n tmp_2d = files_[-1].Get('fevt/h_a_m_pT')\n tmp_m = files_[-1].Get('fevt/h_jet_ma')\n tmp_pt = files_[-1].Get('fevt/h_jet_pta')\n if (firstfile):\n histos['mVSpT'] = tmp_2d.Clone('mVSpT')\n histos['mass'] = tmp_m.Clone('m')\n histos['pt'] = tmp_pt.Clone('pt')\n firstfile = False\n if not (firstfile):\n histos['mVSpT'].Add(tmp_2d)\n histos['mass'].Add(tmp_m)\n histos['pt'].Add(tmp_pt)\n\nprint (histos['mVSpT'].GetNbinsX())\nprint (histos['mVSpT'].GetNbinsY())\nbinx = []\nbiny = []\nbinz = []\n\nhistos['mVSpT_inverted'] = histos['mVSpT'].Clone('mVSpT_inverted')\nbinint = histos['mVSpT'].Integral()\nbinmax = 0\nfor iBinX in range(histos['mVSpT'].GetNbinsX()):\n #binx.append(histos['mVSpT'].GetXaxis().GetBinUpEdge(iBinX+1))\n for iBinY in range(histos['mVSpT'].GetNbinsY()):\n binz.append(histos['mVSpT'].GetBinContent(iBinX+1,iBinY+1))\n if (histos['mVSpT'].GetBinContent(iBinX+1,iBinY+1) > binmax):\n binmax = histos['mVSpT'].GetBinContent(iBinX+1,iBinY+1)\nprint(binmax)\n\nhistos['mVSpT_ratio'] = histos['mVSpT'].Clone('mVSpT_ratio')\nfor iBinX in range(histos['mVSpT_ratio'].GetNbinsX()):\n for iBinY in range(histos['mVSpT_ratio'].GetNbinsY()):\n if (histos['mVSpT'].GetBinContent(iBinX+1,iBinY+1) == 0): continue #Avoids division by 0, in the case that the bin content was 0\n histos['mVSpT_ratio'].SetBinContent(iBinX+1, iBinY+1, ((1/binmax)*(histos['mVSpT'].GetBinContent(iBinX+1,iBinY+1))) )\n histos['mVSpT_inverted'].SetBinContent(iBinX+1, iBinY+1, (1/binmax)*(binint/histos['mVSpT'].GetBinContent(iBinX+1,iBinY+1)))\n\nhistos['mass_inverted'] = histos['mass'].Clone('mass_inverted')\nmassint = histos['mass'].Integral()\nmassmax = 0\nfor iBinX in range(histos['mass'].GetNbinsX()):\n binx.append(histos['mass'].GetXaxis().GetBinUpEdge(iBinX+1))\n if (massmax < massint/histos['mass'].GetBinContent(iBinX+1)): \n massmax = massint/histos['mass'].GetBinContent(iBinX+1)\nfor iBinX in range(histos['mass_inverted'].GetNbinsX()):\n if (histos['mass'].GetBinContent(iBinX+1) == 0): continue #Avoids division by 0, in the case that the bin content was 0\n histos['mass_inverted'].SetBinContent(iBinX+1, (1/massmax)*(massint/histos['mass'].GetBinContent(iBinX+1)))\n\nhistos['pt_inverted'] = histos['pt'].Clone('pt_inverted')\nptint = histos['pt'].Integral()\nptmax = 0\nfor iBinX in range(histos['pt'].GetNbinsX()):\n biny.append(histos['pt'].GetXaxis().GetBinUpEdge(iBinX+1))\n if (ptmax < ptint/histos['pt'].GetBinContent(iBinX+1)):\n ptmax = ptint/histos['pt'].GetBinContent(iBinX+1)\nfor iBinX in range(histos['pt_inverted'].GetNbinsX()):\n if (histos['pt'].GetBinContent(iBinX+1) == 0): continue #Avoids division by 0, in the case that the bin content was 0\n histos['pt_inverted'].SetBinContent(iBinX+1, (1/ptmax)*(ptint/histos['pt'].GetBinContent(iBinX+1)))\n \nprint(binx)\nprint(biny)\nprint(binz)\n\ncanvas = loadcanvas(\"c1\")\ncanvas.cd()\nhistos['mVSpT'].GetXaxis().SetTitle(\"m^{a} (GeV)\")\nhistos['mVSpT'].GetYaxis().SetTitle(\"p_{T}^{a} (GeV)\")\nhistos['mVSpT'].SetMinimum(0)\nhistos['mVSpT'].Draw('COLZ TEXT')\nCMS_lumi.CMS_lumi(canvas, iPeriod, iPos)\ncanvas.Update()\ncanvas.SaveAs('a_massVspT_biased.root')\ncanvas.SaveAs('a_massVspT_biased.png')\n\ncanvas = loadcanvas(\"c2\")\ncanvas.cd()\nlegend = loadlegend(canvas.GetTopMargin(), canvas.GetBottomMargin(), canvas.GetLeftMargin(), canvas.GetRightMargin())\nhistos['mass'].SetLineColor(2)\nhistos['mass'].SetLineWidth(3)\nhistos['mass'].SetXTitle(\"m^{a} (GeV)\")\nhistos['mass'].SetYTitle(\"Jets\")\nhistos['mass'].SetMinimum(0)\nhistos['mass'].Draw('COLZ TEXT')\nlegend.AddEntry(histos['mass'], 'Biased','lf')\nCMS_lumi.CMS_lumi(canvas, iPeriod, iPos)\ncanvas.Update()\nlegend.Draw()\ncanvas.SaveAs('a_m_biased.root')\ncanvas.SaveAs('a_m_biased.png')\n\ncanvas = loadcanvas(\"c3\")\ncanvas.cd()\nlegend = loadlegend(canvas.GetTopMargin(), canvas.GetBottomMargin(), canvas.GetLeftMargin(), canvas.GetRightMargin())\nhistos['pt'].SetLineColor(2)\nhistos['pt'].SetLineWidth(3)\nhistos['pt'].SetXTitle(\"p_{T}^{a} (GeV)\")\nhistos['pt'].SetYTitle(\"Jets\")\nhistos['pt'].SetMinimum(0)\nhistos['pt'].Draw('COLZ TEXT')\nlegend.AddEntry(histos['pt'], 'Biased','lf')\nCMS_lumi.CMS_lumi(canvas, iPeriod, iPos)\ncanvas.Update()\nlegend.Draw()\ncanvas.SaveAs('a_pt_biased.root')\ncanvas.SaveAs('a_pt_biased.png')\n\ncanvas = loadcanvas(\"c4\")\ncanvas.cd()\nhistos['mVSpT_ratio'].GetXaxis().SetTitle(\"m^{a} (GeV)\")\nhistos['mVSpT_ratio'].GetYaxis().SetTitle(\"p_{T}^{a} (GeV)\")\nhistos['mVSpT_ratio'].SetMinimum(0)\nhistos['mVSpT_ratio'].Draw('COLZ TEXT')\nCMS_lumi.CMS_lumi(canvas, iPeriod, iPos)\ncanvas.Update()\ncanvas.SaveAs('a_massVspT_ratio_biased.root')\ncanvas.SaveAs('a_massVspT_ratio_biased.png')\n\ncanvas = loadcanvas(\"c5\")\ncanvas.cd()\nlegend = loadlegend(canvas.GetTopMargin(), canvas.GetBottomMargin(), canvas.GetLeftMargin(), canvas.GetRightMargin())\nhistos['mass_inverted'].SetLineColor(2)\nhistos['mass_inverted'].SetLineWidth(3)\nhistos['mass_inverted'].SetXTitle(\"m^{a} (GeV)\")\nhistos['mass_inverted'].SetYTitle(\"Jets\")\nhistos['mass_inverted'].Draw('COLZ TEXT')\nlegend.AddEntry(histos['mass_inverted'], 'Biased','lf')\nCMS_lumi.CMS_lumi(canvas, iPeriod, iPos)\ncanvas.Update()\nlegend.Draw()\ncanvas.SaveAs('a_m_inverted_biased.root')\ncanvas.SaveAs('a_m_inverted_biased.png')\n\ncanvas = loadcanvas(\"c6\")\ncanvas.cd()\nlegend = loadlegend(canvas.GetTopMargin(), canvas.GetBottomMargin(), canvas.GetLeftMargin(), canvas.GetRightMargin())\nhistos['pt_inverted'].SetLineColor(2)\nhistos['pt_inverted'].SetLineWidth(3)\nhistos['pt_inverted'].SetXTitle(\"p_{T}^{a} (GeV)\")\nhistos['pt_inverted'].SetYTitle(\"Jets\")\nhistos['pt_inverted'].Draw('COLZ TEXT')\nlegend.AddEntry(histos['pt_inverted'], 'Biased','lf')\nCMS_lumi.CMS_lumi(canvas, iPeriod, iPos)\ncanvas.Update()\nlegend.Draw()\ncanvas.SaveAs('a_pt_inverted_biased.root')\ncanvas.SaveAs('a_pt_inverted_biased.png')\n\ncanvas = loadcanvas(\"c7\")\ncanvas.cd()\nhistos['mVSpT_inverted'].GetXaxis().SetTitle(\"m^{a} (GeV)\")\nhistos['mVSpT_inverted'].GetYaxis().SetTitle(\"p_{T}^{a} (GeV)\")\nhistos['mVSpT_inverted'].SetMinimum(0)\nhistos['mVSpT_inverted'].Draw('COLZ TEXT')\nCMS_lumi.CMS_lumi(canvas, iPeriod, iPos)\ncanvas.Update()\ncanvas.SaveAs('a_massVspT_inverted_biased.root')\ncanvas.SaveAs('a_massVspT_inverted_biased.png')\n","sub_path":"plot_mVSpT.py","file_name":"plot_mVSpT.py","file_ext":"py","file_size_in_byte":8007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"421280790","text":"from django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.auth.views import (\n LoginView, LogoutView\n)\nfrom django.contrib.auth import authenticate, login,logout\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.core.signing import BadSignature, SignatureExpired, loads, dumps\nfrom django.http import Http404, HttpResponseBadRequest\nfrom django.shortcuts import redirect,render,get_object_or_404 # 追加\nfrom django.template.loader import render_to_string\nfrom django.views import generic\nfrom .forms import (\n LoginForm, UserCreateForm, StudentCreateForm, CompanyCreateForm, PostAddForm # PostAddFormの追加\n)\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.generic import TemplateView\n\nfrom .models import User, Student, Company, BoardModel\nfrom .decorators import student_required, society_required, company_required\n\nfrom django.contrib.auth.forms import UserCreationForm\n\n\n# ログイン前のページ表示\ndef selectfunc(request):\n return render(request,'select.html')\n\n\n# signup時、studentかsocietyか選択\nclass SignUpView(TemplateView):\n template_name = 'signup.html'\n\n\n# login\ndef loginfunc(request):\n if request.method == 'POST':\n username = request.POST.get('username')\n password = request.POST.get('password')\n user_login = User.objects.get(email=username)\n user = authenticate(username=username, password=password)\n if user is not None:\n login(request, user_login)\n if user_login.is_student:\n return redirect('app:student_home') # student_homeにリダイレクトする\n if user_login.is_society:\n return redirect('app:detailfun')\n if user_login.is_company:\n return redirect('app:company_home')\n else:\n return render(request, 'login.html', {'error':'メールアドレスかパスワードが間違っています'})\n else:\n return render(request, 'login.html')\n\n\n# StudentUserのhome画面\n@login_required\n@student_required\ndef student_home(request):\n object_list = BoardModel.objects.all().order_by('-readtext') # サークルの投稿フォームと同様にreadtextまで取ってくる\n return render(request, 'student_home.html', {'object_list':object_list})\n\n\n# SocietyUserのhome画面\n@login_required\n@society_required\ndef society_home(request):\n return render(request,'society_home.html')\n\n\n# CompanyUserのhome画面\n@login_required\n@company_required\ndef company_home(request):\n return render(request,'company_home.html')\n\n\nclass Logout(LogoutView):\n \"\"\"ログアウトページ\"\"\"\n template_name = 'select.html'\n\n\n# SocietyUserのsignup\nclass UserCreate(generic.CreateView):\n \"\"\"ユーザー仮登録\"\"\"\n template_name = 'user_create.html'\n form_class = UserCreateForm\n\n def form_valid(self, form):\n \"\"\"仮登録と本登録用メールの発行.\"\"\"\n # 仮登録と本登録の切り替えは、is_active属性を使うと簡単です。\n # 退会処理も、is_activeをFalseにするだけにしておくと捗ります。\n \n user = form.save(commit=False)\n user.is_active = False\n user.save()\n\n # アクティベーションURLの送付\n current_site = get_current_site(self.request)\n domain = current_site.domain\n context = {\n 'protocol': self.request.scheme,\n 'domain': domain,\n 'token': dumps(user.pk),\n 'user': user,\n }\n\n subject = render_to_string('app/mail_template/create/subject.txt', context)\n message = render_to_string('app/mail_template/create/message.txt', context)\n\n user.email_user(subject, message)\n return redirect('app:user_create_done')\n\n\n#StudentUserのsignup\nclass StudentCreate(generic.CreateView):\n model = User\n form_class = StudentCreateForm\n template_name = 'user_create.html'\n\n def get_context_data(self, **kwargs):\n kwargs['user_type'] = 'student'\n return super().get_context_data(**kwargs)\n\n def form_valid(self, form):\n user = form.save()\n #login(self.request, user)\n #return redirect('app:list')。\n \n #user = form.save(commit=False)\n user.is_active = False\n user.save()\n\n # アクティベーションURLの送付\n current_site = get_current_site(self.request)\n domain = current_site.domain\n context = {\n 'protocol': self.request.scheme,\n 'domain': domain,\n 'token': dumps(user.pk),\n 'user': user,\n }\n\n subject = render_to_string('app/mail_template/create/subject.txt', context)\n message = render_to_string('app/mail_template/create/message.txt', context)\n\n user.email_user(subject, message)\n return redirect('app:user_create_done')\n\n\n#companyUserのsignup\nclass CompanyCreate(generic.CreateView):\n model = User\n form_class = CompanyCreateForm\n template_name = 'user_create.html'\n\n def get_context_data(self, **kwargs):\n kwargs['user_type'] = 'company'\n return super().get_context_data(**kwargs)\n\n def form_valid(self, form):\n user = form.save()\n #login(self.request, user)\n #return redirect('app:list')。\n \n #user = form.save(commit=False)\n user.is_active = False\n user.save()\n\n # アクティベーションURLの送付\n current_site = get_current_site(self.request)\n domain = current_site.domain\n context = {\n 'protocol': self.request.scheme,\n 'domain': domain,\n 'token': dumps(user.pk),\n 'user': user,\n }\n\n subject = render_to_string('app/mail_template/create/subject.txt', context)\n message = render_to_string('app/mail_template/create/message.txt', context)\n\n user.email_user(subject, message)\n return redirect('app:user_create_done')\n\n\n\n# User(Society/Student)の仮登録\nclass UserCreateDone(generic.TemplateView):\n template_name = 'user_create_done.html'\n\n\n# User(Society/Student)の本登録処理\nclass UserCreateComplete(generic.TemplateView):\n \"\"\"メール内URLアクセス後のユーザー本登録\"\"\"\n template_name = 'user_create_complete.html'\n timeout_seconds = getattr(settings, 'ACTIVATION_TIMEOUT_SECONDS', 60*60*24) # デフォルトでは1日以内\n\n def get(self, request, **kwargs):\n \"\"\"tokenが正しければ本登録.\"\"\"\n token = kwargs.get('token')\n try:\n user_pk = loads(token, max_age=self.timeout_seconds)\n\n # 期限切れ\n except SignatureExpired:\n return HttpResponseBadRequest()\n\n # tokenが間違っている\n except BadSignature:\n return HttpResponseBadRequest()\n\n # tokenは問題なし\n else:\n try:\n user = User.objects.get(pk=user_pk)\n except User.DoesNotExist:\n return HttpResponseBadRequest()\n else:\n if not user.is_active:\n # 問題なければ本登録とする\n user.is_active = True\n user.save()\n return super().get(request, **kwargs)\n\n return HttpResponseBadRequest()\n\n# nagaya_develop_branchの変更箇所\n# 各投稿の詳細ページに飛ぶ\ndef detailfunc(request):\n object = BoardModel.objects.all().order_by('-readtext') # BordModelモデルの記事(objects)を全て(all())作成された順番(order_by('-readtext'))に取得してobject変数に代入\n return render(request, 'detail.html', {'object':object})\n\n# 各BoardModelを参照するため用のdetail関数を用意\ndef everypost(request, post_id): # urls.pyから送られてくるrequestとeverypost_idを取得\n post = get_object_or_404(BoardModel, id=post_id) # idが存在しなかった場合、「404 not found」\n return render(request, 'everypost.html', {'post': post})\n\n# 投稿フォーム用のadd関数\ndef add(request):\n if request.method == \"POST\":\n form = PostAddForm(request.POST, request.FILES)\n if form.is_valid():\n post = form.save(commit=False)\n post.user = request.user\n post.save()\n return redirect('app:detailfun')\n else: \n form = PostAddForm()\n return render(request, 'add.html', {'form': form})\n\n# 編集フォーム用のedit関数。編集ボタンをeverypost.htmlに作成。\ndef edit(request, post_id):\n post = get_object_or_404(BoardModel, id=post_id)\n if request.method == \"POST\":\n form = PostAddForm(request.POST, request.FILES, instance=post)\n if form.is_valid():\n form.save()\n return redirect('app:everypost', post_id=post.id)\n else:\n form = PostAddForm(instance=post)\n return render(request, 'edit.html', {'form': form, 'post':post })\n\n# 削除フォーム用のdelete関数\n# 削除機能はHTMLファイルを作成する必要がない。everypost.htmlに削除ボタンを作成。\ndef delete(request, post_id):\n post = get_object_or_404(BoardModel, id=post_id)\n post.delete()\n return redirect('app:detailfun')\n\n# # 学生側は別のeveyypost(編集削除できない)ページを作る。そのための関数。\n# def everypostforStuednt(request, post_id):\n# post = get_object_or_404(BoardModel, id=post_id) # idが存在しなかった場合、「404 not found」\n# return render(request, 'everypostforStudent.html', {'post': post})\n#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n\n# いいね機能の実装\ndef goodfunc(request, pk):\n post = BoardModel.objects.get(pk=pk)\n post.good = post.good + 1\n post.save()\n return redirect('app:student_home')","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"514399097","text":"# -*- coding: utf8\n\nfrom __future__ import print_function\n\nimport numpy as np\nimport argparse\nimport time\nimport os\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\nfrom torch.optim.lr_scheduler import MultiStepLR\n\nfrom dataset import MNIST, FashionMNIST\nfrom model import Multitask\n\nparser = argparse.ArgumentParser(description='PyTorch CNN Sentence Classification')\n# training configs\nparser.add_argument('--optimizer', type=str, default='Adam',\n help='training optimizer (default: Adam)')\nparser.add_argument('--batch-size', type=int, default=100,\n help='input batch size for training (default: 100)')\nparser.add_argument('--test-batch-size', type=int, default=100,\n help='input batch size for testing (default: 100)')\nparser.add_argument('--n-class', type=int, default=10,\n help='number of class (default: 10)')\nparser.add_argument('--epochs', type=int, default=50,\n help='number of epochs to train (default: 50)')\nparser.add_argument('--lr', type=float, default=1e-3,\n help='learning rate (default: 0.001)')\nparser.add_argument('--momentum', type=float, default=0.9,\n help='SGD momentum (default: 0.9)')\nparser.add_argument('--w-decay', type=float, default=0.,\n help='L2 norm (default: 0)')\nparser.add_argument('--log-interval', type=int, default=500,\n help='how many batches to wait before logging training status')\nparser.add_argument('--pre-trained', type=int, default=1,\n help='using pre-trained model or not (default: 1)')\n# data\nparser.add_argument('--source-dataset', type=str, default='M+F',\n help='source dataset')\nparser.add_argument('--target-dataset', type=str, default='F',\n help='current dataset')\n# device\nparser.add_argument('--cuda', type=int, default=1,\n help='using CUDA training')\nparser.add_argument('--multi-gpu', action='store_true', default=False,\n help='using multi-gpu')\nargs = parser.parse_args()\nargs.cuda = args.cuda and torch.cuda.is_available()\nparams = \"{}-{}-batch{}-epoch{}-lr{}-momentum{}-wdecay{}\".format(args.source_dataset, args.optimizer, args.batch_size, args.epochs, args.lr, args.momentum, args.w_decay)\nprint('args: {}\\nparams: {}'.format(args, params))\n\n# define result file & model file\nresult_dir = 'result'\nmodel_dir = 'model'\nfor dir in [result_dir, model_dir]:\n if not os.path.exists(dir):\n os.makedirs(dir)\naccs = np.zeros(args.epochs)\n\n# load data\ntrain_data = FashionMNIST(phase='train')\ntrain_loader = DataLoader(train_data, batch_size=args.batch_size, shuffle=True, num_workers=4)\nval_data = FashionMNIST(phase='val')\nval_loader = DataLoader(val_data, batch_size=args.test_batch_size, shuffle=False, num_workers=4)\n\n# load pre-trained model\nif args.pre_trained:\n pretrained_model = torch.load(os.path.join(model_dir, params))\n pretrained_dict = pretrained_model.state_dict()\n if args.target_dataset == 'M':\n pretrained_dict['classifier.weight'] = pretrained_dict['classifier.weight'][:10]\n pretrained_dict['classifier.bias'] = pretrained_dict['classifier.bias'][:10]\n elif args.target_dataset == 'F':\n pretrained_dict['classifier.weight'] = pretrained_dict['classifier.weight'][10:]\n pretrained_dict['classifier.bias'] = pretrained_dict['classifier.bias'][10:]\n print(\"Load pre-trained model\", params)\n\n# use pre-trained weight\nmodel = Multitask(n_class=train_data.n_class)\nmodel.load_state_dict(pretrained_dict) # load the state dict\nparams = \"transfer-{}to{}-{}-batch{}-epoch{}-lr{}-momentum{}-wdecay{}\".format(args.source_dataset, args.target_dataset, args.optimizer, args.batch_size, args.epochs, args.lr, args.momentum, args.w_decay)\n\n# use GPU\nif args.cuda:\n ts = time.time()\n model = model.cuda()\n if args.multi_gpu:\n num_gpu = list(range(torch.cuda.device_count()))\n model = nn.DataParallel(model, device_ids=num_gpu)\n print(\"Finish cuda loading, time elapsed {}\".format(time.time() - ts))\n\n# define loss & optimizer\nif args.optimizer == 'Adam':\n optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.w_decay)\nelif args.optimizer == 'SGD':\n optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.w_decay)\nelif args.optimizer == 'RMSprop':\n optimizer = torch.optim.RMSprop(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.w_decay)\nscheduler = MultiStepLR(optimizer, milestones=[25], gamma=0.01)\ncriterion = nn.BCEWithLogitsLoss()\n\n\ndef train(epoch):\n model.train()\n for idx, batch in enumerate(train_loader):\n optimizer.zero_grad()\n if args.cuda:\n batch['X'] = batch['X'].cuda()\n batch['Y'] = batch['Y'].cuda()\n inputs, target = Variable(batch['X']), Variable(batch['Y'])\n output = model(inputs)\n loss = criterion(output, target)\n loss.backward()\n optimizer.step()\n if idx % args.log_interval == 0:\n print(\"Training epoch {}, idx {}, loss {}\".format(epoch, idx, loss.data[0]))\n\n\ndef val(epoch):\n model.eval()\n val_loss = 0.\n correct = 0\n for idx, batch in enumerate(val_loader):\n if args.cuda:\n batch['X'] = batch['X'].cuda()\n batch['Y'] = batch['Y'].cuda()\n inputs, target = Variable(batch['X']), Variable(batch['Y'])\n output = model(inputs)\n val_loss += criterion(output, target).data[0]\n pred = np.argmax(output.data.cpu().numpy(), axis=1)\n target = np.argmax(target.data.cpu().numpy(), axis=1)\n correct += (pred == target).sum()\n\n val_loss /= idx\n acc = correct / len(val_data)\n accs[epoch] = acc\n np.save(os.path.join(result_dir, params), accs)\n if acc >= np.max(accs):\n model_name = os.path.join(model_dir, params)\n torch.save(model, model_name)\n print(\"Validating epoch {}, val_loss {}, acc {:.4f}({}/{})\".format(epoch, val_loss, acc, correct, len(val_data)))\n\n\nif __name__ == \"__main__\":\n val(0) # test initial performance before training\n\n # training with one dataset\n print(\"Strat training\")\n for epoch in range(args.epochs):\n scheduler.step()\n ts = time.time()\n train(epoch)\n val(epoch)\n print(\"Finish epoch {}, time elapsed {}\".format(epoch, time.time() - ts))\n print(\"Best val acc {}\".format(np.max(accs)))\n","sub_path":"transfer.py","file_name":"transfer.py","file_ext":"py","file_size_in_byte":6380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"91012246","text":"#!/usr/bin/python3.4\nimport os\nimport re\nimport time\nimport argparse\nimport zipfile\nimport shutil\nfrom datetime import datetime, timedelta\nimport tempfile\nimport MySQLdb as mariadb\nimport fnmatch\nimport concurrent.futures\nimport configparser\n\ndef _read_config(config_file, envir='prod'):\n config = configparser.ConfigParser()\n config.read(config_file)\n configuraton = config._sections\n return configuraton.get(envir)\n\n\ndef roundTime(roundTo=1, dt=datetime.now()):\n \"\"\"Round a datetime object to any time laps in seconds\"\"\"\n rt = dt + timedelta(days=roundTo)\n return rt\n\n\ndef get_file(datemas, env):\n port = 22\n hostanme = env.get('hostname')\n username = env.get('username')\n password = env.get('password')\n folder = env.get('folder')\n filemask = env.get('filemask')\n\n #proc_filenames = [os.path.join(folder,f) for f in os.listdir(folder) if os.path.isfile(os.path.join(folder, f)) and fnmatch.fnmatch(f, \"traces.{}.*\".format(datemas[0].strftime(\"%Y.%m\")))]\n proc_filenames = [os.path.join(folder, f) for f in os.listdir(folder) if\n os.path.isfile(os.path.join(folder, f)) and datetime.strptime(f[7:17], '%Y.%m.%d') in datemas]\n\n \"\"\"and \n sftp_templates = []\n if len(datemas)>2:\n for cdate in range(datemas):\n sftp_templates.append(\"traces.\" + cdate.strftime(\"%Y.%m.%d\") + \".*.zip\")\n else:\n for i in range((datemas[1]-datemas[0]).days):\n sftp_templates.append(\"traces.\" + (datemas[0]+timedelta(days=i)).strftime(\"%Y.%m.%d\") + \".*.zip\")\n\n \n for filename in onlyfiles:\n for sftp_temp in sftp_templates:\n if fnmatch.fnmatch(filename, sftp_temp):\n proc_filenames.append(os.path.join(folder, filename))\n \"\"\"\n run_mp(proc_filenames)\n\n\ndef timer(f):\n def inner(*args, **kwargs):\n try:\n t0 = time.time()\n return f(*args, **kwargs)\n finally:\n t1 = time.time()\n print(f.__name__, 'TOOK', t1 - t0)\n\n return inner\n\n\n# @timer\ndef f3(fullpath, dest, filemask):\n filelist = []\n\n tink_re = re.compile(filemask)\n\n with zipfile.ZipFile(fullpath) as zip_file:\n\n tink_list = filter(tink_re.match, list(zip_file.namelist()))\n\n for member in tink_list:\n filename = os.path.basename(member)\n # skip directories\n if not filename:\n continue\n filelist.append(filename)\n # copy file (taken from zipfile's extract)\n source = zip_file.open(member)\n target = open(os.path.join(dest, filename), \"wb\")\n with source, target:\n shutil.copyfileobj(source, target)\n return filelist\n\n\ndef mp_parse_fitcp(filename):\n mes_iso_regexp = re.compile(\"MES\\.iso\\.i(\\d{3}).+?\\[(.+?)\\]\", re.IGNORECASE)\n mes_date_regexp = re.compile(\"MES\\.lerd.+?\\[(.+?)\\]\", re.IGNORECASE)\n mes_rpc_regexp = re.compile(\"MES\\.srif\\.rprc.+?\\[(.+?)\\]\", re.IGNORECASE)\n mes_ioss_regexp = re.compile(\"IOS\\.ioss\\.rprc.+?\\[(.+?)\\]\", re.IGNORECASE)\n mes_vhr_regexp = re.compile(\"MES\\.vhr\\.vhrc.+?\\[(.+?)\\]\", re.IGNORECASE)\n file_message = {}\n tcpip_messages = []\n with open(filename) as f:\n for line in f:\n if \"FILOOP*READ\" in line:\n if len(file_message) > 3:\n tcpip_messages.append(file_message.copy())\n # print (file_message)\n file_message.clear()\n continue\n mes_iso_search = mes_iso_regexp.search(line)\n mes_date_search = mes_date_regexp.search(line)\n mes_rpc_search = mes_rpc_regexp.search(line)\n mes_ioss_search = mes_ioss_regexp.search(line)\n mes_vhr_search = mes_vhr_regexp.search(line)\n if mes_iso_search:\n file_message.update({mes_iso_search.group(1).strip(): mes_iso_search.group(2).strip()})\n elif mes_date_search:\n file_message.update({'date': mes_date_search.group(1).strip()})\n elif mes_rpc_search:\n file_message.update({'API': mes_rpc_search.group(1).strip()})\n elif mes_ioss_search:\n file_message.update({'IOSS': mes_ioss_search.group(1).strip()})\n elif mes_vhr_search:\n file_message.update({'VHR': mes_vhr_search.group(1).strip()})\n if len(file_message) > 3:\n tcpip_messages.append(file_message.copy())\n return tcpip_messages\n\n\ndef mp_parse_archive(filename):\n with tempfile.TemporaryDirectory() as tmpdir:\n zipfilelist = (f3(filename, tmpdir, \".+?(ATM|POS|TINK)-port.+?\\.dmp\"))\n\n mariadb_connection = mariadb.connect(user='root', password='', database='online', host='localhost')\n c = mariadb_connection.cursor()\n try:\n select_var = \"SELECT id from `{}` where filename = '{}'\".format(\"files\", os.path.basename(filename))\n c.execute(select_var)\n sql_data = c.fetchall()\n if (sql_data):\n #filecoursor = sql_data[0][0]\n return filename\n else:\n insert_var = \"INSERT INTO `{}` (filename) values ( '{}' )\".format(\"files\", os.path.basename(filename))\n c.execute(insert_var)\n filecoursor = c.lastrowid\n except Exception as ex:\n print(\"Error: {} when put file: {}\".format(ex, insert_var))\n return\n\n for tl in zipfilelist:\n mes_section_regexp = re.compile(\"^\\s+\\[((\\w+|\\s*)*)\\]\\n\", re.IGNORECASE)\n mes_parameter_regexp = re.compile(\"^((\\w+\\s+){1,3})\\s+\\[(.+?)\\]\\n\")\n mes_i002_regexp = re.compile(\".+?Parsing\\s+\\[i002\\].+?\\[(.+?)\\]\\n\")\n mes_datetime_regexp = re.compile(\"^(DEBUG|INFO)\\s+(\\d{4}\\/\\d{2}\\/\\d{2})\\s+(\\d{2}:\\d{2}:\\d{2})\\.\\d{3}\")\n\n parameter = \"\"\n outbound = False\n with open(os.path.join(tmpdir, tl)) as f:\n mess_dict = {}\n for line in f:\n if \"HandleInboundMsg exited\" in line or \"Outbound Msg SENT!\" in line:\n mes_datetime_search = mes_datetime_regexp.search(line)\n if mes_datetime_search:\n mess_dict.update({'datetime': \"{} {}\".format(mes_datetime_search.group(2),\n mes_datetime_search.group(3))})\n if outbound is True and mess_dict:\n write_mysql('small_table', mess_dict, 1, filecoursor, c)\n mess_dict.clear()\n elif outbound is False and mess_dict:\n write_mysql('small_table', mess_dict, 0, filecoursor, c)\n mess_dict.clear()\n\n if \"MSG RCVD in port\" in line:\n outbound = True\n continue\n elif \"MSG RCVD in queue\" in line:\n outbound = False\n continue\n mes_section_search = mes_section_regexp.search(line)\n mes_parameter_search = mes_parameter_regexp.search(line)\n mes_i002_search = mes_i002_regexp.search(line)\n if mes_section_search:\n parameter = mes_section_search.group(1)\n if mes_i002_search:\n mess_dict.update({'i002': mes_i002_search.group(1)})\n # print(line)\n if mes_parameter_search:\n mess_sel_dict = mess_dict.get(parameter, {})\n mess_sel_dict.update({mes_parameter_search.group(1).strip(): mes_parameter_search.group(3)})\n mess_dict.update({parameter: mess_sel_dict})\n # print (line)\n\n mariadb_connection.commit()\n mariadb_connection.close()\n return filename\n\n\n# @timer\ndef run_mp(filenames, findrrn=None):\n futures = []\n with concurrent.futures.ProcessPoolExecutor(max_workers=5) as executor:\n for cfile in filenames:\n #print(\"Processed file {}\".format(cfile))\n futures.append(executor.submit(mp_parse_archive, cfile))\n\n for future in concurrent.futures.as_completed(futures):\n try:\n out_file = future.result()\n print(\"File {} has been processed num {}\".format(out_file, future))\n except Exception as exc:\n print(\"Error parsing file {} : {}\".format(out_file, str(exc)))\n\n\ndef write_mysql(tablename, input_message, direction, filecoursor, coursor):\n # dt = datetime.strptime(\"{} {}\".format(input_message['Terminal']['TermDate'], input_message['Terminal']['TermTime']),'%Y%m%d %H%M%S')\n \"\"\"\n uk = []\n for i in input_message:\n if isinstance(i, dict):\n for k, v in i.items():\n if isinstance(v, dict):\n for uv in v.keys():\n fk = \"{}_{}\".format(k.replace(\" \", \"_\"), uv.replace(\" \", \"_\"))\n if fk not in uk:\n uk.append(fk)\n else:\n fk = \"{}\".format(k.replace(\" \", \"_\"))\n if fk not in uk:\n uk.append(fk)\n else:\n print(\"LOL\" + i)\n\n if not uk:\n print(\"Messages for tablename {} is empty\".format(tablename))\n return\n\n if isinstance(input_message, dict):\n # print(insert_var)\n\n for mes in input_message:\n kk = []\n kv = []\n\n # print (mes['Additional Data']['External ID'])\n # mes['Additional Data']['TraceNumber']\n # mes['Response']['ReferenceNumber']\n # mes['AccouintFrom']['CardNumber']\n for k, v in mes.items():\n if isinstance(v, dict):\n for lk, lv in v.items():\n fk = \"{}_{}\".format(k.replace(\" \", \"_\"), lk.replace(\" \", \"_\"))\n kk.append(fk)\n kv.append(lv.replace(\"'\", \"\"))\n else:\n fk = \"{}\".format(k.replace(\" \", \"_\"))\n kk.append(fk)\n kv.append(v.replace(\"'\", \"\"))\n # print(\"INSERT INTO \" + tablename + \" ( \" + ','.join(\"'{}'\".format(w) for w in kk) + \" ) VALUES ( \" + ', '.join(\"'{}'\".format(w) for w in kv) + \" )\")\n insert_var = \"INSERT INTO `\" + tablename + \"` ( \" + ','.join(\n \"`{}`\".format(w) for w in kk) + \" ) VALUES ( \" + ', '.join(\n \"'{}'\".format(w) for w in kv) + \" )\"\n # print(insert_var)\n c.execute(insert_var)\n \"\"\"\n\n if isinstance(input_message, dict):\n card_number = input_message.get('i002', '')\n if not card_number:\n if input_message['AccountFrom'].get('CardNumber'):\n card_number = input_message['AccountFrom']['CardNumber']\n else:\n card_number = input_message['AccountFrom']['AccntCardNumber']\n\n \"\"\"\n try:\n if card_number:\n select_var = \"SELECT id from `{}` where card_number = '{}'\".format(\"card_numbers\", card_number)\n coursor.execute(select_var)\n sql_data = coursor.fetchall()\n if (sql_data):\n cardcoursor = sql_data[0][0]\n else:\n insert_var = \"INSERT INTO `{}` (card_number) values ( '{}' )\".format(\"card_numbers\", card_number)\n coursor.execute(insert_var)\n cardcoursor = coursor.lastrowid\n else:\n select_var = \"SELECT card_number from `{}` where external_id = '{}' or rrn = '{}'\".format(\"small_table\",\n input_message[\n 'Additional Data'][\n 'External ID'], input_message['Response']['ReferenceNumber'])\n coursor.execute(select_var)\n sql_data = coursor.fetchall()\n if (sql_data):\n cardcoursor = sql_data[0][0]\n else:\n insert_var = \"INSERT INTO `{}` (card_number) values ( '{}' )\".format(\"card_numbers\", input_message['AccountFrom']['CardNumber'].replace('X','0'))\n coursor.execute(insert_var)\n cardcoursor = coursor.lastrowid\n print(\"Not found card_number for external_id: {}\".format(\n input_message['Additional Data']['External ID']))\n \n except Exception as ex:\n print(\"Error: {} when put file: {}\".format(ex, filecoursor))\n return\n \n \"\"\"\n \"\"\"DateTime\"\"\"\n if input_message['Terminal'].get('TermDate'):\n tdatetime = datetime.strptime(\n \"{} {}\".format(input_message['Terminal']['TermDate'], input_message['Terminal']['TermTime']),\n \"%Y%m%d %H%M%S\")\n else:\n tdatetime = datetime.strptime(\n \"{}\".format(input_message['datetime']), \"%Y/%m/%d %H:%M:%S\")\n try:\n insert_var = \"INSERT INTO `{}` ( `date`, direction, external_id, rrn, stan, card_number, filename, resp_code) values ( '{}', {}, '{}', '{}', '{}', '{}', '{}', '{}' )\".format(\n tablename, tdatetime.strftime('%Y-%m-%d %H:%M:%S'), direction,\n input_message['Additional Data']['External ID'],\n input_message['Response']['ReferenceNumber'], (input_message['Additional Data']).get('TraceNumber', ''),\n card_number, filecoursor, (input_message['Response']).get('OutCode', ''))\n\n coursor.execute(insert_var)\n except Exception as ex:\n print(\"Error: {} in string: {}\".format(ex, insert_var))\n\n\ndef _parse_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"filetime\", action='append', nargs='*', default=[lambda: datetime.now().strftime('%d.%m.%Y')], help=\"Enter start filetime\")\n parser.add_argument('-d', default=-1, dest='days_to_put', help=\"days to put\", type=int)\n parser.add_argument('-t', dest='test', help='Unsing TEST environment', action='store_true')\n parser.add_argument('-n', dest='clean', help='Not clean DB before start', action='store_false')\n parser.add_argument('-f', dest='find', help='String to find')\n parser.add_argument('-c', dest='conffile', default='config.ini', help='path to config file')\n\n return parser\n\ndef get_list_date(start_date, end_date):\n if end_date > start_date:\n deltedays = [start_date + timedelta(days=x) for x in range(0, (end_date-start_date).days+1)]\n else:\n deltedays = [end_date + timedelta(days=x) for x in range(0, (start_date - end_date).days+1)]\n return deltedays\n\ndef _main_parselogs(args):\n if hasattr(args, 'find'):\n findrrn = args.find\n\n if hasattr(args, 'clean'):\n clean = args.clean\n\n if hasattr(args, 'test'):\n test = args.test\n\n if hasattr(args, 'conffile'):\n conffile = args.conffile\n\n if test:\n env = _read_config(conffile, 'test')\n else:\n env = _read_config(conffile, 'prod')\n\n datelist = []\n\n\n if hasattr(args, 'days_to_put'):\n days_to_put = args.days_to_put\n\n if hasattr(args, 'filetime'):\n filetimes = args.filetime[1]\n\n\n datemas = []\n if len(filetimes) == 1:\n if callable(filetimes[0]):\n first_date = datetime.strptime(filetimes[0](), '%d.%m.%Y')\n else:\n first_date = datetime.strptime(filetimes[0], '%d.%m.%Y')\n if days_to_put:\n second_date = roundTime(roundTo=days_to_put, dt=first_date)\n datemas = get_list_date(first_date, second_date)\n\n elif len(filetimes) == 2:\n datemas = get_list_date(datetime.strptime(filetimes[0], '%d.%m.%Y'), datetime.strptime(filetimes[1], '%d.%m.%Y'))\n elif len(filetimes) > 2:\n for ctime in filetimes:\n datemas.append(datetime.strptime(ctime, '%d.%m.%Y'))\n\n filespath = get_file(datemas, env)\n\n \"\"\"\n for filepath in filespath:\n if os.path.exists(filepath):\n print(\"File to processing {}\".format(filepath))\n run_mp([filepath], findrrn)\n os.remove(filepath)\n \"\"\"\n\n\nif __name__ == '__main__':\n _main_parselogs(_parse_arguments().parse_args())\n","sub_path":"small_log_mysql.py","file_name":"small_log_mysql.py","file_ext":"py","file_size_in_byte":16576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"199581979","text":"import os\nimport pickle\n\nimport auth\nimport tbap.api as api\n\n\ndef store_local_data():\n sn_json = api.Session(auth.username, auth.key, season=2017,\n data_format=\"json\")\n sn_xml = api.Session(auth.username, auth.key, season=2017,\n data_format=\"xml\")\n\n os.chdir(\"C:/Users/stacy/OneDrive/Projects/FIRST_API/tbap/data\")\n\n season_json = api.get_season(sn_json)\n with open(\"season_json.pickle\", \"wb\") as f:\n pickle.dump(season_json, f, pickle.HIGHEST_PROTOCOL)\n\n season_xml = api.get_season(sn_xml)\n with open(\"season_xml.pickle\", \"wb\") as f:\n pickle.dump(season_xml, f, pickle.HIGHEST_PROTOCOL)\n\n status_json = api.get_status(sn_json)\n with open(\"status_json.pickle\", \"wb\") as f:\n pickle.dump(status_json, f, pickle.HIGHEST_PROTOCOL)\n\n status_xml = api.get_status(sn_xml)\n with open(\"status_xml.pickle\", \"wb\") as f:\n pickle.dump(status_xml, f, pickle.HIGHEST_PROTOCOL)\n\n districts_json = api.get_districts(sn_json)\n with open(\"districts_json.pickle\", \"wb\") as f:\n pickle.dump(districts_json, f, pickle.HIGHEST_PROTOCOL)\n\n districts_xml = api.get_districts(sn_xml)\n with open(\"districts_xml.pickle\", \"wb\") as f:\n pickle.dump(districts_xml, f, pickle.HIGHEST_PROTOCOL)\n\n events_json = api.get_events(sn_json, district =\"PNW\")\n with open(\"events_json.pickle\", \"wb\") as f:\n pickle.dump(events_json, f, pickle.HIGHEST_PROTOCOL)\n\n events_xml = api.get_events(sn_xml, district =\"PNW\")\n with open(\"events_jxml.pickle\", \"wb\") as f:\n pickle.dump(events_xml, f, pickle.HIGHEST_PROTOCOL)\n\n teams_json = api.get_teams(sn_json, district =\"PNW\")\n with open(\"teams_json.pickle\", \"wb\") as f:\n pickle.dump(teams_json, f, pickle.HIGHEST_PROTOCOL)\n\n teams_xml = api.get_teams(sn_xml, district =\"PNW\")\n with open(\"teams_xml.pickle\", \"wb\") as f:\n pickle.dump(teams_xml, f, pickle.HIGHEST_PROTOCOL)","sub_path":"data/pickel_data.py","file_name":"pickel_data.py","file_ext":"py","file_size_in_byte":1957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"119905903","text":"APP_NAME = 'AWS SOC'\nAPP_VERSION = '1.0.0'\n\nDATA_DIR = '/data'\nHOSTS_DIR = '{}/hosts'.format(DATA_DIR)\nDATABASE_DIR = '{}/database'.format(DATA_DIR)\nHOST_FILENAME = 'host.json'\nQID_FILENAME = 'qid.json'\nTOOLS_FILENAME = 'tools.json'\n\nTOOLS_LIST = ['tanium', 'qualys', 'splunk']\nTOOLS_COUNT = 3\nDEFAULT_TOOLS_STATUS = 'N/A'\nREGION = 'eu-west-2'\n\nHEADER = [\"status\", \"name\", \"ip\", \"os\", \"tanium\", \"qualys\", \"splunk\", \"qid\", \"last check\"]\nQ_HEADER = [\"host\", \"qid\", \"description\", \"solution\"]\n","sub_path":"prod-config.py","file_name":"prod-config.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"220805657","text":"import numpy as np\n\nclass SMO:\n def __init__(self, C, tol, kernel='rbf', gamma=None):\n # 惩罚系数\n self.C = C\n # 优化过程中alpha步进阈值\n self.tol = tol\n\n # 核函数\n if kernel == 'rbf':\n self.K = self._gaussian_kernel\n self.gamma = gamma\n else:\n self.K = self._linear_kernel\n\n def _gaussian_kernel(self, U, v):\n '''高斯核函数'''\n if U.ndim == 1:\n p = np.dot(U - v, U - v)\n else:\n p = np.sum((U - v) * (U - v), axis=1)\n return np.exp(-self.gamma * p)\n \n def _linear_kernel(self, U, v):\n '''线性核函数'''\n return np.dot(U, v)\n\n def _g(self, x):\n '''函数g(x)'''\n alpha, b, X, y, E = self.args\n\n idx = np.nonzero(alpha > 0)[0]\n if idx.size > 0:\n return np.sum(y[idx] * alpha[idx] * self.K(X[idx], x)) + b[0]\n return b[0]\n\n def _optimize_alpha_i_j(self, i, j):\n '''优化alpha_i, alpha_j'''\n alpha, b, X, y, E = self.args\n C, tol, K = self.C, self.tol, self.K\n\n # 优化需有两个不同alpha\n if i == j:\n return 0\n\n # 计算alpha[j]的边界\n if y[i] != y[j]:\n L = max(0, alpha[j] - alpha[i])\n H = min(C, C + alpha[j] - alpha[i])\n else:\n L = max(0, alpha[j] + alpha[i] - C)\n H = min(C, alpha[j] + alpha[i])\n\n # L == H 时已无优化空间(一个点).\n if L == H:\n return 0\n\n # 计算eta\n eta = K(X[i], X[i]) + K(X[j], X[j]) - 2 * K(X[i], X[j])\n if eta <= 0:\n return 0\n\n # 对于alpha非边界, 使用E缓存. 边界alpha, 动态计算E.\n if 0 < alpha[i] < C:\n E_i = E[i]\n else:\n E_i = self._g(X[i]) - y[i]\n\n if 0 < alpha[j] < C:\n E_j = E[j]\n else:\n E_j = self._g(X[j]) - y[j]\n \n # 计算alpha_j_new\n alpha_j_new = alpha[j] + y[j] * (E_i - E_j) / eta\n\n # 对alpha_j_new进行剪辑\n if alpha_j_new > H:\n alpha_j_new = H\n elif alpha_j_new < L:\n alpha_j_new = L\n alpha_j_new = np.round(alpha_j_new, 7)\n\n # 判断步进是否足够大\n if np.abs(alpha_j_new - alpha[j]) < tol * (alpha_j_new + alpha[j] + tol):\n return 0\n\n # 计算alpha_i_new\n alpha_i_new = alpha[i] + y[i] * y[j] * (alpha[j] - alpha_j_new)\n alpha_i_new = np.round(alpha_i_new, 7)\n\n # 计算b_new\n b1 = b[0] - E_i \\\n -y[i] * (alpha_i_new - alpha[i]) * K(X[i], X[i]) \\\n -y[j] * (alpha_j_new - alpha[j]) * K(X[i], X[j])\n\n b2 = b[0] - E_j \\\n -y[i] * (alpha_i_new - alpha[i]) * K(X[i], X[j]) \\\n -y[j] * (alpha_j_new - alpha[j]) * K(X[j], X[j])\n\n if 0 < alpha_i_new < C:\n b_new = b1\n elif 0 < alpha_j_new < C:\n b_new = b2\n else:\n b_new = (b1 + b2) / 2\n\n # 更新E缓存\n # 更新E[i],E[j]. 若优化后alpha若不在边界, 缓存有效且值为0.\n E[i] = E[j] = 0\n # 更新其他非边界alpha对应的E[k]\n mask = (alpha != 0) & (alpha != C)\n mask[i] = mask[j] = False\n non_bound_idx = np.nonzero(mask)[0]\n for k in non_bound_idx:\n E[k] += b_new - b[0] + y[i] * K(X[i], X[k]) * (alpha_i_new - alpha[i]) \\\n + y[j] * K(X[j], X[k]) * (alpha_j_new - alpha[j])\n\n # 更新alpha_i, alpha_i\n alpha[i] = alpha_i_new\n alpha[j] = alpha_j_new\n\n # 更新b\n b[0] = b_new\n\n return 1\n\n def _optimize_alpha_i(self, i):\n '''优化alpha_i, 内部寻找alpha_j.'''\n alpha, b, X, y, E = self.args\n\n # 对于alpha非边界, 使用E缓存. 边界alpha, 动态计算E.\n if 0 < alpha[i] < self.C:\n E_i = E[i]\n else:\n E_i = self._g(X[i]) - y[i]\n\n # alpha_i仅在违反KKT条件时进行优化.\n if (E_i * y[i] < -self.tol and alpha[i] < self.C) or \\\n (E_i * y[i] > self.tol and alpha[i] > 0):\n # 按优先级次序选择alpha_j.\n\n # 分别获取非边界alpha和边界alpha的索引\n mask = (alpha != 0) & (alpha != self.C)\n non_bound_idx = np.nonzero(mask)[0]\n bound_idx = np.nonzero(~mask)[0]\n\n # 优先级(-1)\n # 若非边界alpha个数大于1, 寻找使得|E_i - E_j|最大化的alpha_j.\n if len(non_bound_idx) > 1:\n if E[i] > 0:\n j = np.argmin(E[non_bound_idx])\n else:\n j = np.argmax(E[non_bound_idx])\n\n if self._optimize_alpha_i_j(i, j):\n return 1\n\n # 优先级(-2)\n # 随机迭代非边界alpha\n np.random.shuffle(non_bound_idx)\n for j in non_bound_idx:\n if self._optimize_alpha_i_j(i, j):\n return 1\n\n # 优先级(-3)\n # 随机迭代边界alpha\n np.random.shuffle(bound_idx)\n for j in bound_idx:\n if self._optimize_alpha_i_j(i, j):\n return 1\n\n return 0\n\n def train(self, X_train, y_train):\n '''训练'''\n m, _ = X_train.shape\n\n # 初始化向量alpha和标量b\n alpha = np.zeros(m)\n b = np.zeros(1)\n\n # 创建E缓存\n E = np.zeros(m)\n\n # 将各方法频繁使用的参数收集到列表, 供调用时传递.\n self.args = [alpha, b, X_train, y_train, E]\n\n n_changed = 0\n examine_all = True\n while n_changed > 0 or examine_all:\n n_changed = 0\n\n # 迭代alpha_i\n for i in range(m):\n if examine_all or 0 < alpha[i] < self.C:\n n_changed += self._optimize_alpha_i(i)\n\n print('n_changed: %s' % n_changed)\n print('sv num: %s' % np.count_nonzero((alpha > 0) & (alpha < self.C)))\n\n # 若当前迭代非边界alpha, 且没有alpha改变, 下次迭代所有alpha.\n # 否则, 下次迭代非边界间alpha.\n examine_all = (not examine_all) and (n_changed == 0)\n\n # 训练完成后保存模型参数: \n idx = np.nonzero(alpha > 0)[0]\n # 1.非零alpha \n self.sv_alpha = alpha[idx] \n # 2.支持向量, \n self.sv_X = X_train[idx]\n self.sv_y = y_train[idx]\n # 3.b.\n self.sv_b = b[0]\n\n def _predict_one(self, x):\n '''对单个输入进行预测'''\n k = self.K(self.sv_X, x)\n return np.sum(self.sv_y * self.sv_alpha * k) + self.sv_b\n\n def predict(self, X_test):\n '''预测'''\n y_pred = np.apply_along_axis(self._predict_one, 1, X_test)\n return np.squeeze(np.where(y_pred > 0, 1., -1.))\n","sub_path":"book-code/机器学习-numpy实现代码/6.支持向量机/svm.py","file_name":"svm.py","file_ext":"py","file_size_in_byte":6946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"114998291","text":"__author__ = 'greg'\r\n\r\nfrom django.core.management.base import BaseCommand, CommandError\r\nfrom dealfinder.models import Card, CardSet, Product\r\nimport requests\r\nimport sys\r\nfrom django.db import models\r\nfrom dealfinder import productFetcher\r\n\r\n\r\nclass Command(BaseCommand):\r\n\tTCG_PARTNER_KEY = 'DEALFINDER'\r\n\r\n\tdef handle(self, *args, **options):\r\n\t\tcardSets = CardSet.objects.all()\r\n\t\tdepthcount = 15\r\n\t\tfor set in cardSets:\r\n\t\t\tif (depthcount < 1):\r\n\t\t\t\tbreak\r\n\t\t\tprint(unicode(set.name).encode('utf-8'))\r\n\t\t\tcardIds = set.getCardIds()\r\n\t\t\tfor cardId in cardIds:\r\n\t\t\t\tcard = None\r\n\t\t\t\tprint(cardId)\r\n\t\t\t\ttry:\r\n\t\t\t\t\tcard = Card.objects.get(multiverseId=cardId)\r\n\t\t\t\texcept:\r\n\t\t\t\t\t#print(\"Error: \", sys.exc_info()[0])\r\n\t\t\t\t\t#raise\r\n\t\t\t\t\tprint('Card ' + cardId + ' Does not Exist')\r\n\t\t\t\tif (card != None):\r\n\t\t\t\t\tprint((set.name + ' : ' + card.name).encode('utf-8'))\r\n\t\t\t\t\treq = requests.get(self.generateTcgUrl(set.name, card.name))\r\n\t\t\t\t\tproduct = productFetcher.productFromXmlString(req.text, card.name)\r\n\t\t\t\t\tif (product != None):\r\n\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\tprint(\"Product Exists! Updating with latest\")\r\n\t\t\t\t\t\t\tprint(\"tcgId: \" + product.tcgId)\r\n\t\t\t\t\t\t\tproductTcgId = product.tcgId\r\n\t\t\t\t\t\t\texistingProduct = Product.objects.get(tcgId=product.tcgId)\r\n\t\t\t\t\t\t\tprint(\"GotHere1!\")\r\n\t\t\t\t\t\t\texistingProduct.updateFieldsFromProduct(product)\r\n\t\t\t\t\t\t\texistingProduct.save()\r\n\t\t\t\t\t\t\tprint(\"GotHere2!\")\r\n\t\t\t\t\t\t\tif (card.product == None):\r\n\t\t\t\t\t\t\t\tcard.product = existingProduct\r\n\t\t\t\t\t\t\t\tcard.save()\r\n\t\t\t\t\t\texcept:\r\n\t\t\t\t\t\t\t# print \"Unexpected error:\", sys.exc_info()[0]\r\n\t\t\t\t\t\t\t#raise\r\n\t\t\t\t\t\t\tprint(\"No Product Exists! Updating Product\")\r\n\t\t\t\t\t\t\tproduct.save()\r\n\t\t\t\t\t\t\tcard.product = product\r\n\t\t\t\t\t\t\tcard.save()\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tprint(\"The Product Was missing from TCG Player or was malformed\")\r\n\t\t\tdepthcount -= 1\r\n\r\n\tdef generateTcgUrl(self, setName, cardName):\r\n\t\trequestString = 'http://partner.tcgplayer.com/x3/phl.asmx/p?pk=' + self.TCG_PARTNER_KEY + '&s=' + setName + '&p=' + cardName\r\n\t\treturn requestString\r\n","sub_path":"backend/dealfinder/management/commands/tcgToProductCommand.py","file_name":"tcgToProductCommand.py","file_ext":"py","file_size_in_byte":2001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"499938777","text":"from lcoe.lcoe import lcoe\nimport numpy as np\nfrom numpy_financial import irr\nimport matplotlib.pyplot as plt\n\noperating_cost = 1000 # $million/year\ncapital_cost = 50000 # $million\ndiscount_rate = 0.02 # %\nlifetime = 25\nannual_output = 25000 # kWh\n\ncapital_cost = [50, 100, 200, 400]\noperating_costs = [5, 10, 20, 40]\n\n\ncosts = [capital_cost, operating_costs, discount_rate]\n\nfor cost in costs:\n list_lcoe = []\n list_irr = []\n #figure = 1\n for n in cost:\n\n lcoe_value = lcoe(annual_output, n, operating_cost, discount_rate, lifetime)\n print(\"lcoe is:\", lcoe_value)\n if lcoe_value < 0.5:\n print(\"profits per kWh:\", 0.5 - lcoe_value)\n else:\n print(\"lcoe is above market price\")\n lcoe_profit: float = (0.5 - lcoe_value) * annual_output\n y = 1 + discount_rate\n print(\"annual revenue is:\", lcoe_profit)\n\n irr_input = np.zeros(25)\n irr_input[0] = - n\n\n for j in range(1, 25):\n irr_input[j] = lcoe_profit/y**j\n\n irr_output = irr(irr_input)\n\n print(\"irr is:\", irr_output)\n\n list_lcoe.append(lcoe_value)\n list_irr.append(irr_output)\n\n\n\n #plt.plot([capital_cost], [irr_output], 'ro')\n #plt.plot([50000, 100000, 200000, 40000], [0.14244087683478937, 0.24488175366957873, 0.44976350733915743, 0.12195270146783148], 'ro')\n #plt.ylabel('lcoe')\n #plt.xlabel('Capital Cost')\n #plt.show()\n print(list_lcoe)\n print(list_irr)\n plot = plt.figure()\n plt.plot(cost, list_irr)\n #figure = figure + 1\n plt.show()\n\n\"\"\"\nclass Test():\n\n def funct1(self):\n pass\n def func2(self):\n pass\n\"\"\"","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"616057866","text":"# -*- coding: utf-8 -*-\n###################################################\n# LOCAL import\n###################################################\nfrom Plugins.Extensions.IPTVPlayer.components.iptvplayerinit import TranslateTXT as _, SetIPTVPlayerLastHostError\nfrom Plugins.Extensions.IPTVPlayer.tools.iptvtools import printDBG, printExc\nfrom Plugins.Extensions.IPTVPlayer.libs.urlparser import urlparser\n###################################################\n\n###################################################\n# FOREIGN import\n###################################################\nimport re\n###################################################\n\ndef ParseM3u(data):\n printDBG('ParseM3u')\n list = []\n data = data.replace(\"\\r\",\"\\n\").replace('\\n\\n', '\\n').split('\\n')\n printDBG(\">>>>>>>>>>>>> data0[%s]\" % data[0])\n if '#EXTM3U' not in data[0]:\n return list\n \n params = {'title':'', 'length':'', 'uri':''}\n for line in data:\n line = line.strip()\n printDBG(line)\n if line.startswith('#EXTINF:'):\n try:\n length, title = line.split('#EXTINF:')[1].split(',', 1)\n params = {'title':title, 'length':length, 'uri':''}\n except Exception:\n printExc()\n params = {'title':'', 'length':'', 'uri':''}\n else:\n if '' != params['title']:\n line = line.replace('rtmp://$OPT:rtmp-raw=', '')\n cTitle = re.sub('\\[[^\\]]*?\\]', '', params['title'])\n if len(cTitle): params['title'] = cTitle\n params['uri'] = urlparser.decorateParamsFromUrl(line)\n list.append(params)\n params = {'title':'', 'length':'', 'uri':''}\n return list","sub_path":"IPTVPlayer/libs/m3uparser.py","file_name":"m3uparser.py","file_ext":"py","file_size_in_byte":1731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"72751437","text":"from flask import Blueprint, session, redirect, url_for\nfrom flask.ext.mako import render_template\n\nmod = Blueprint('events', __name__, url_prefix='/events')\n\nrelative_location = '..'\n\n\n@mod.route('/santa')\ndef santa_form():\n url = \"https://docs.google.com/forms/d/1g4_yI1z1I183OXaUqe1EyMiXW7vtDovZgVQCTHGib1Y/viewform\"\n parameters = {\n 'title': 'Secret Santa',\n 'url': url,\n 'relative_location': relative_location}\n return render_template('external_form.mako', **parameters)\n\n\n@mod.route('/santa/setup')\ndef santa_setup():\n parameters = {\n 'title': 'Santa Amazon WishList Setup Instructions',\n 'relative_location': '../..'\n }\n return render_template('events/amazon.mako', **parameters)\n","sub_path":"osalt/views/events.py","file_name":"events.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"637361263","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see \n#\n# @author : pascal.fautrero@ac-versailles.fr\n\n\n#import os, shutil\nimport inkex\nimport tempfile\nimport Tkinter\nimport os\nimport ConfigParser\nfrom xiaconverter.mainwindow import IADialog\nfrom xiaconverter.loggerinkscape import LoggerInkscape\n\nclass ImageActive(inkex.Effect):\n def __init__(self):\n inkex.Effect.__init__(self)\n\n def effect(self):\n\n # fix inkscape bug\n # https://bugs.launchpad.net/ubuntu/+source/inkscape/+bug/944077/comments/11\n pathNodes = self.document.xpath('//sodipodi:namedview',namespaces=inkex.NSS)\n pathNodes[0].set('id','base')\n\n # workaround - fix path according to working dir\n # inkscape 0.47 extensions working dir is inkscape/\n # inkscape 0.48 extensions working dir is inkscape/share/extensions\n\n inkexWorkingDir = \".\"\n if not os.getcwd().endswith(\"extensions\"):\n inkexWorkingDir = \"share/extensions\"\n\n # retrieve paths\n\n config = ConfigParser.ConfigParser()\n config.read(inkexWorkingDir + \"/xia.cnf\")\n numVersion = config.get('version', 'numVersion')\n releaseVersion = config.get('version', 'releaseVersion')\n imagesPath = inkexWorkingDir + \"/\" + config.get('paths', 'imagesPath')\n langPath = inkexWorkingDir + \"/\" + config.get('paths', 'langPath')\n fontsPath = inkexWorkingDir + \"/\" + config.get('paths', 'fontsPath')\n themesPath = inkexWorkingDir + \"/\" + config.get('paths', 'themesPath')\n labjsLib = inkexWorkingDir + \"/\" + config.get('paths', 'labjsLib')\n jqueryLib = inkexWorkingDir + \"/\" + config.get('paths', 'jqueryLib')\n kineticLib = inkexWorkingDir + \"/\" + config.get('paths', 'kineticLib')\n sha1Lib = inkexWorkingDir + \"/\" + config.get('paths', 'sha1Lib')\n\n try:\n\n filePath = tempfile.mkdtemp() + \"/\" + \"temp.svg\"\n with open(filePath,\"w\") as file:\n self.document.write(filePath)\n\n console = LoggerInkscape()\n\n root = Tkinter.Tk()\n root.title(\"XIA \" + numVersion + releaseVersion)\n root.geometry(\"465x310\")\n root.resizable(0,0)\n img = Tkinter.PhotoImage(file= imagesPath + '/xia64.gif')\n root.tk.call('wm', 'iconphoto', root._w, img)\n maindialog = IADialog(root, console, langPath, imagesPath, themesPath, fontsPath, labjsLib, jqueryLib,\n kineticLib, sha1Lib, filePath)\n maindialog.pack(side=\"left\")\n root.mainloop()\n\n except ValueError:\n #inkex.debug(ValueError)\n pass\n\nia = ImageActive()\nia.affect()\n","sub_path":"src/inkscape-plugin/xia.py","file_name":"xia.py","file_ext":"py","file_size_in_byte":3346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"253645084","text":"import requests\nfrom bs4 import BeautifulSoup\nimport pprint\nimport datetime\nimport csv\nfrom dateutil.relativedelta import relativedelta\nfrom decouple import config\n\ntestcase = []\nkey = config('MOVIE_KEY')\ntargetDt = '20190713'\nbase_url = f'http://www.kobis.or.kr/kobisopenapi/webservice/rest/boxoffice/searchWeeklyBoxOfficeList.json?key={key}&targetDt={targetDt}&weekGb=0'\n# pprint.pprint(response)\nwith open('boxoffice.csv','w', encoding='utf-8', newline='') as f:\n fieldnames = ['movieCd', 'movieNm', 'audiAcc']\n csv_writer = csv.DictWriter(f, fieldnames=fieldnames, extrasaction='ignore')\n csv_writer.writeheader()\n for date_p in range(1, 51):\n proc_date = datetime.datetime.strptime(targetDt, '%Y%m%d')\n proc_date = proc_date - relativedelta(weeks=1)\n targetDt = proc_date.strftime('%Y%m%d')\n base_url = f'http://www.kobis.or.kr/kobisopenapi/webservice/rest/boxoffice/searchWeeklyBoxOfficeList.json?key={key}&targetDt={targetDt}&weekGb=0'\n response = requests.get(base_url).json()\n print(response)\n for i in range(10):\n if response['boxOfficeResult']['weeklyBoxOfficeList'][i]['movieCd'] not in testcase:\n csv_writer.writerow(response['boxOfficeResult']['weeklyBoxOfficeList'][i])\n testcase.append(response['boxOfficeResult']['weeklyBoxOfficeList'][i]['movieCd'])\n\n \n\n","sub_path":"01.py","file_name":"01.py","file_ext":"py","file_size_in_byte":1385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"193272329","text":"from datetime import datetime\n\nfrom django.core.exceptions import PermissionDenied\n\nfrom .models import Invitations\n\n\nALLOWED_SIGNUP_DOMAINS = [\n 'getpocket.com', 'mozilla.com', 'mozillafoundation.org'\n]\n\n\ndef invitations_only(sender, **kwargs):\n sociallogin = kwargs['sociallogin']\n email = sociallogin.account.extra_data['email']\n domain_part = email.split('@')[1]\n for allowed_domain in ALLOWED_SIGNUP_DOMAINS:\n if domain_part == allowed_domain:\n return True\n try:\n active_invitation = Invitations.objects.get(email=email, active=True)\n except Invitations.DoesNotExist:\n raise PermissionDenied\n if not active_invitation.date_redeemed:\n active_invitation.date_redeemed = datetime.now()\n active_invitation.save()\n return True\n","sub_path":"privaterelay/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"59193340","text":"# !/usr/bin/env python\n\n#\n# Copyright (c) 2018-2019 Intel Corporation\n#\n# This work is licensed under the terms of the MIT license.\n# For a copy, see .\n#\n\n\"\"\"\nTool Function used to design the Feature Histograms\n\"\"\"\n\n# ------------------------\n# IMPORTS\n# ------------------------\n\nimport numpy\nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing import MaxAbsScaler\n\n# ------------------------\n# Histograms Function\n# ------------------------\n\n\ndef histograms(dataset):\n X = dataset[:, 0:11]\n Y = dataset[:, 11]\n features = []\n\n X = MaxAbsScaler().fit_transform(X)\n\n class0 = []\n class1 = []\n class2 = []\n idx = 0\n\n kwargs = dict(histtype='bar', alpha=1, bins='auto', color='navy')\n plt.hist(Y, **kwargs)\n plt.title('Quality')\n plt.ylabel('Counts')\n plt.xlabel('Value')\n plt.grid(axis='y', alpha=0.75)\n plt.show()\n\n for feature in range(0, 11):\n idx = 0\n class0 = []\n class1 = []\n class2 = []\n for sample in Y:\n if 0 <= sample <= 4:\n class0.append(X[idx, feature])\n elif 5 <= sample <= 6:\n class1.append(X[idx, feature])\n else:\n class2.append(X[idx, feature])\n idx += 1\n\n kwargs0 = dict(histtype='bar', alpha=0.5, bins='auto', color='red')\n kwargs1 = dict(histtype='bar', alpha=0.5, bins='auto', color='yellow')\n kwargs2 = dict(histtype='bar', alpha=0.5, bins='auto', color='navy')\n\n # design plot\n plt.hist(class0, **kwargs0)\n plt.hist(class1, **kwargs1)\n plt.hist(class2, **kwargs2)\n plt.title('Feature ' + str(feature + 1) + \": \" + features[feature])\n plt.ylabel('Counts')\n plt.xlabel('Value')\n plt.grid(axis='y', alpha=0.75)\n plt.legend((\"Classification: 0\", \"Classification: 1\", \"Classification: 2\"))\n plt.show()\n\n\nif __name__ == '__main__':\n dataset = numpy.loadtxt(\"../csv/dataset.csv\", delimiter=\";\", skiprows=1)\n histograms(dataset)\n\n","sub_path":"first_project/feature_histograms/histograms.py","file_name":"histograms.py","file_ext":"py","file_size_in_byte":2060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"568991942","text":"# Authors: Hamza Tazi Bouardi (htazi@mit.edu), Michael L. Li (mlli@mit.edu), Omar Skali Lami (oskali@mit.edu)\nfrom datetime import datetime\n\n# Default parameters\ndate_MATHEMATICA = \"2020-05-07\" # Transition date from Mathematica to Python\ndefault_parameter_list = [1, 0, 2, 0.2, 0.05, 0.2, 3, 3, 0.1, 3, 1] # Default parameters for the solver\ndefault_bounds_params = (\n (0.75, 1.25), (-10, 10), (1, 3), (0.05, 0.5), (0.01, 0.25), (0, 0.5), (0.1, 10), (0.1, 10),(0,5), (0,7),(0.1,5)\n ) # Bounds for the solver\nvalidcases_threshold = 7 # Minimum number of cases to fit the base-DELPHI\nvalidcases_threshold_policy = 15 # Minimum number of cases to train the country-level policy predictions\nmax_iter = 500 # Maximum number of iterations for the algorithm\n\n# Initial condition of exposed state and infected state\nIncubeD = 5\nRecoverID = 10\nRecoverHD = 15\nDetectD = 2\nVentilatedD = 10 # Recovery Time when Ventilated\ndefault_maxT = datetime(2021,1,15) # Maximum timespan of prediction\nn_params_without_policy_params = 7 # alpha, r_dth, p_dth, a, b, k1, k2\np_v = 0.25 # Percentage of ventilated\np_d = 0.2 # Percentage of infection cases detected.\np_h = 0.15 # Percentage of detected cases hospitalized\n\n# Policies and future times for counterfactual predictions\nfuture_policies = [\n 'No_Measure', 'Restrict_Mass_Gatherings', 'Mass_Gatherings_Authorized_But_Others_Restricted',\n 'Restrict_Mass_Gatherings_and_Schools', 'Authorize_Schools_but_Restrict_Mass_Gatherings_and_Others',\n 'Restrict_Mass_Gatherings_and_Schools_and_Others', 'Lockdown'\n]\ndefault_maxT_policies = datetime(2020, 12, 15) # Maximum timespan of prediction under different policy scenarios\nfuture_times = [0, 7, 14, 28, 42]\n\n# Additional utils inputs\nTIME_DICT = {0: \"Now\", 7: \"One Week\", 14: \"Two Weeks\", 28: \"Four Weeks\", 42: \"Six Weeks\"}\nMAPPING_STATE_CODE_TO_STATE_NAME ={\n 'AL': 'Alabama', 'AK': 'Alaska', 'AZ': 'Arizona', 'AR': 'Arkansas', 'CA': 'California',\n 'CO': 'Colorado', 'CT': 'Connecticut', 'DE': 'Delaware', 'DC': 'District of Columbia',\n 'FL': 'Florida', 'GA': 'Georgia', 'HI': 'Hawaii', 'ID': 'Idaho', 'IL': 'Illinois',\n 'IN': 'Indiana', 'IA': 'Iowa', 'KS': 'Kansas', 'KY': 'Kentucky', 'LA': 'Louisiana',\n 'ME': 'Maine', 'MD': 'Maryland', 'MA': 'Massachusetts', 'MI': 'Michigan',\n 'MN': 'Minnesota', 'MS': 'Mississippi', 'MO': 'Missouri', 'MT': 'Montana',\n 'NE': 'Nebraska', 'NV': 'Nevada', 'NH': 'New Hampshire', 'NJ': 'New Jersey',\n 'NM': 'New Mexico', 'NY': 'New York', 'NC': 'North Carolina', 'ND': 'North Dakota',\n 'OH': 'Ohio', 'OK': 'Oklahoma', 'OR': 'Oregon', 'PA': 'Pennsylvania',\n 'RI': 'Rhode Island', 'SC': 'South Carolina', 'SD': 'South Dakota', 'TN': 'Tennessee',\n 'TX': 'Texas', 'UT': 'Utah', 'VT': 'Vermont', 'VA': 'Virginia', 'WA': 'Washington',\n 'WV': 'West Virginia', 'WI': 'Wisconsin', 'WY': 'Wyoming', \"AS\": \"American Samoa\",\n \"GU\": \"Guam\", \"MP\": \"Northern Marianas\", \"PR\": \"Puerto Rico\", \"VI\": \"Virgin Islands\"\n}\ndefault_policy = \"Lockdown\" # Eventually change to future_policies[-1]\ndefault_policy_enaction_time = 'Now' # Eventually change to TIME_DICT[0]\n","sub_path":"CDC/DELPHI_params_CDC.py","file_name":"DELPHI_params_CDC.py","file_ext":"py","file_size_in_byte":3153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"259341358","text":"# \"\"\"\n# This is the interface that allows for creating nested lists.\n# You should not implement it, or speculate about its implementation\n# \"\"\"\n#class NestedInteger(object):\n# def isInteger(self):\n# \"\"\"\n# @return True if this NestedInteger holds a single integer, rather than a nested list.\n# :rtype bool\n# \"\"\"\n#\n# def getInteger(self):\n# \"\"\"\n# @return the single integer that this NestedInteger holds, if it holds a single integer\n# Return None if this NestedInteger holds a nested list\n# :rtype int\n# \"\"\"\n#\n# def getList(self):\n# \"\"\"\n# @return the nested list that this NestedInteger holds, if it holds a nested list\n# Return None if this NestedInteger holds a single integer\n# :rtype List[NestedInteger]\n# \"\"\"\n\nclass Solution(object):\n def depthSum_recur(self, nestedList):\n \"\"\"\n :type nestedList: List[NestedInteger]\n :rtype: int\n \"\"\"\n def nestDFS(nest, n):\n nestsum = 0\n for i in nest:\n nestsum += n*i.getInteger() if i.isInteger() else nestDFS(i.getList(), n+1)\n return nestsum\n return nestDFS(nestedList, 1)\n\n def depthSum(self, nestedList):\n \"\"\"\n :type nestedList: List[NestedInteger]\n :rtype: int\n \"\"\"\n ans = 0\n recurlist = [(nestedList, 1)]\n while recurlist:\n nest, n = recurlist.pop()\n for i in nest:\n if i.isInteger():\n ans += n*i.getInteger()\n else:\n recurlist.append((i.getList(), n+1))\n return ans","sub_path":"Nested_List_Weight_Sum.py","file_name":"Nested_List_Weight_Sum.py","file_ext":"py","file_size_in_byte":1661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"462121422","text":"class Solution:\n def trap(self, height: List[int]) -> int:\n left=0\n right=len(height)-1\n ans=0\n left_max=0\n right_max=0\n while left=left_max:\n left_max=height[left]\n else:\n ans+=(left_max-height[left])\n left+=1\n else:\n if height[right]>=right_max:\n right_max=height[right]\n else:\n ans+=(right_max-height[right])\n right-=1\n return ans\n#Time-Complexity: O(n)\n#Space-complexity: O(1)","sub_path":"rain_water.py","file_name":"rain_water.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"362306297","text":"# Create a function that takes a list as a parameter,\n# and returns a new list with all it's element value doubled.\n# It should raise an error if the parameter is not a list\n\n\nn = [1, 2, 3, 4, 5]\n# n = ''\n\ndef double_list(input_list):\n testList = []\n if type(input_list) != type(testList):\n raise Error\n newList = []\n for item in input_list:\n item *= 2\n newList.append(item)\n return newList\n\n\nprint(double_list(n))\n","sub_path":"1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"10730546","text":"arquivo = 'quemsoueu_transcricao.fasta'\r\n\r\nf = open(arquivo, 'r')\r\nlines = f.readlines()\r\n\r\nrelist = []\r\n\r\nfor line in lines:\r\n if line.find('>') == 0:\r\n continue\r\n \r\n relist.append(line)\r\n \r\ndados = [0, 0, 0, 0]\r\ni = 0\r\n\r\nLista = []\r\ncount = 1\r\n\r\ndef transcreve(num):\r\n RNA = \"\"\r\n i = 0\r\n teste = relist[num]\r\n while i < len(teste):\r\n if(teste[i] == \"G\"):\r\n RNA += \"C\"\r\n dados[0] += 1\r\n elif(teste[i] == \"C\"):\r\n RNA += \"G\"\r\n dados[1] += 1\r\n elif(teste[i] == \"T\"):\r\n RNA += \"A\"\r\n dados[2] += 1\r\n elif(teste[i] == \"A\"):\r\n RNA += \"U\"\r\n dados[3] += 1\r\n i += 1\r\n return RNA\r\n\r\nfor j in range(len(relist)):\r\n Lista.append(transcreve(j))\r\n\r\nprint(Lista)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"#8_bioinfo.py","file_name":"#8_bioinfo.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"296713189","text":"'''\nTests on SKIN dataset\n'''\n\nfrom sklearn.cluster import KMeans\nfrom scipy.spatial import distance\nimport numpy as np\nimport pandas as pd\nimport random\nimport scipy.io as scio\nfrom numpy import genfromtxt\nfrom KmeansPPcenters import KMeanPlusPlus\nfrom KMeansOut import kmeansOutliers, cost, compute_phi_star\nfrom sklearn.metrics import average_precision_score, precision_recall_curve\nfrom LO import add_noise, KPP_centers, random_centers, LO_cost, KMO_centers, LO_cost3, LO_cost2\nfrom LO_with_itr import LloydOut\n\n\n\nread_data = genfromtxt('realDataProcessed/skin.csv', delimiter=',')\nprint(\"data loaded\")\nskin_labels= read_data[:,3]\nskin_data=read_data[:,0:3]\n\nnum_clusters=[20]\nzs =[100]\nmin_value=0\nmax_value=100000\n#min_values=[-16, 0, 7.693475e-08]\n#max_values= [15, 20, 33]\ntol= .05\n#itr=100\n\ndata= skin_data\n#data_with_outliers=skin_data\niterations=10\nruns=5\nfor num_cluster in num_clusters:\n for z in zs:\n print(\"num_cluster:{}, z:{}\".format(num_cluster, z))\n KMO_LO_prec = []\n KPP_LO_prec = []\n R_LO_prec = []\n KPP_LO_cost=[]\n KMO_LO_cost=[]\n R_LO_cost=[]\n KPP_LO_itr=[]\n KMO_LO_itr=[]\n R_LO_itr=[]\n for i in range(iterations):\n KMO_LO_prec_runs = []\n KPP_LO_prec_runs = []\n R_LO_prec_runs = []\n KPP_LO_cost_runs=[]\n KMO_LO_cost_runs=[]\n R_LO_cost_runs=[]\n KPP_LO_itr_runs=[]\n KMO_LO_itr_runs=[]\n R_LO_itr_runs=[]\n data_with_outliers, z_indx, data_inliers = add_noise(data, z, min_value, max_value)\n for j in range(runs):\n kpp_centers = KPP_centers(data_with_outliers, num_cluster)\n KPP_cost, indx_list= LO_cost3(data_with_outliers, kpp_centers, z)\n KPP_precision = len(np.intersect1d(z_indx, indx_list))/len(z_indx)\n #print(\"KPP\")\n #centers, cid, indx_list, KPP_precision, recall, data_out, KPP_itr =LloydOut(data_with_outliers, kpp_centers, num_cluster, z, tol, itr, z_indx )\n KPP_LO_prec_runs.append(KPP_precision)\n KPP_LO_cost_runs.append(KPP_cost)\n #KPP_LO_itr_runs.append(KPP_itr)\n\n #rand_centers= random_centers(data_with_outliers, num_cluster)\n #centers, cid, indx_list, R_precision, recall, data_out= LloydOut(data_with_outliers, rand_centers, num_cluster, z, tol, itr, z_indx)\n #R_cost= LO_cost2(data_with_outliers, centers, z)\n #R_LO_prec.append(R_precision)\n #3R_LO_cost.append(R_cost)\n #KPP_LO_cost.append(KPP_cost)\n #print(\"KMO\")\n phi_star= compute_phi_star(data, num_cluster, kpp_centers, z)\n kmo_centers= KMO_centers(data_with_outliers, num_cluster, phi_star, z)\n #KMO_cost, indx_list= LO_cost2(data_with_outliers, kmo_centers, z)\n #centers, cid, indx_list, KMO_precision, recall, data_out, KMO_itr= LloydOut(data_with_outliers, kmo_centers, num_cluster, z, tol, itr, z_indx)\n KMO_cost, indx_list= LO_cost3(data_with_outliers, kmo_centers, z)\n KMO_precision = len(np.intersect1d(z_indx, indx_list))/len(z_indx)\n KMO_LO_prec_runs.append(KMO_precision)\n KMO_LO_cost_runs.append(KMO_cost)\n #KMO_LO_itr_runs.append(KMO_itr)\n print(\"runs:{},KPP:{}, cost:{}, itr: {}\".format(j, np.mean(np.array(KPP_LO_prec_runs)), np.mean(np.array(KPP_LO_cost_runs)), np.mean(np.array(KPP_LO_itr_runs))))\n #print(\"Random:{}, cost:{}, itr:{}\".format(np.mean(np.array(R_LO_prec)),np.mean(np.array(R_LO_cost))))\n print(\"runs:{}, KMO:{},cost:{}, itr:{}\".format(j, np.mean(np.array(KMO_LO_prec_runs)), np.mean(np.array(KMO_LO_cost_runs)), np.mean(np.array(KMO_LO_itr_runs))))\n KPP_LO_prec.append(np.mean(np.array(KPP_LO_prec_runs)))\n KPP_LO_cost.append(np.mean(np.array(KPP_LO_cost_runs)))\n #KPP_LO_itr.append(np.mean(np.array(KPP_LO_itr_runs)))\n \n KMO_LO_prec.append(np.mean(np.array(KMO_LO_prec_runs)))\n KMO_LO_cost.append(np.mean(np.array(KMO_LO_cost_runs)))\n #KMO_LO_itr.append(np.mean(np.array(KMO_LO_itr_runs)))\n print(\"KPP:{}, cost:{}, itr: {}\".format( np.mean(np.array(KPP_LO_prec)), np.mean(np.array(KPP_LO_cost)), np.mean(np.array(KPP_LO_itr))))\n #print(\"Random:{}, cost:{}, itr:{}\".format(np.mean(np.array(R_LO_prec)),np.mean(np.array(R_LO_cost))))\n print(\" KMO:{},cost:{}, itr:{}\".format( np.mean(np.array(KMO_LO_prec)), np.mean(np.array(KMO_LO_cost)), np.mean(np.array(KMO_LO_itr))))\n\n\n","sub_path":"lib/LO_SKIN_wo_Lloyds.py","file_name":"LO_SKIN_wo_Lloyds.py","file_ext":"py","file_size_in_byte":4691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"5194275","text":"#coding=utf-8\r\n\r\nfrom django.contrib.auth.models import User\r\nfrom django.contrib.auth import authenticate\r\nfrom django.contrib.sessions.backends.cached_db import SessionStore\r\n\r\nfrom api.error import Error\r\nfrom api.error import ERR_USERNAME\r\nfrom api.error import ERR_USER_DB\r\nfrom api.error import ERR_AUTH_LOGIN\r\nfrom api.error import ERR_AUTH_LOGOUT\r\n\r\nclass API(object):\r\n SESSION_KEY = '_auth_user'\r\n IP_SESSION_KEY = '_auth_ip'\r\n \r\n def get_db_user_by_username(self, username):\r\n user = User.objects.filter(username = username)\r\n if not user.exists():\r\n raise Error(ERR_USERNAME)\r\n return user[0]\r\n\r\n def is_superuser(self, user):\r\n if type(user) != User:\r\n raise Error(ERR_USER_DB)\r\n return user.is_superuser\r\n\r\n def get_session_by_id(self, session_id=None):\r\n session = SessionStore(session_id)\r\n return session\r\n\r\n def login(self, username, password, ip, expiry=86400):\r\n session = self.get_session_by_id()\r\n user = authenticate(username = username, password = password)\r\n if user and user.api_user:\r\n session[self.SESSION_KEY] = user.username\r\n session[self.IP_SESSION_KEY] = ip\r\n session.set_expiry(int(expiry))\r\n session.save()\r\n return session._session_key\r\n return False\r\n\r\n def logout(self, session_id):\r\n try:\r\n session = self.get_session_by_id(session_id)\r\n session.delete()\r\n except Exception as e:\r\n return False\r\n return True\r\n\r\n","sub_path":"vmuser/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"452880009","text":"import glob\nimport sys\nimport time\nfrom pymi.config import config\nfrom pymi.drivers.mysql import connection_mysql\n\n\nclass Migrator:\n microtime = None\n start = 0\n start_base = 0\n\n def __init__(self):\n self.microtime = lambda: int(round(time.time() * 1000))\n self.start_base = self.microtime()\n\n def migrations_run(self, connection):\n exists = self.check_migrations_table(connection)\n\n if not exists:\n self.make_migrations_table(connection)\n\n with open('pymi/resources/run.sql') as f:\n sql = f.read()\n\n with connection.cursor() as cursor:\n cursor.execute(sql)\n\n result = cursor.fetchall()\n\n return result\n\n def connection_get(self, driver):\n if driver == 'mysql':\n return connection_mysql\n\n print('Sorry, the driver `' + driver + '` does not exist.')\n\n sys.exit()\n\n def check_migrations_table(self, connection):\n with open('pymi/resources/check.sql') as f:\n sql_check = f.read()\n\n with connection.cursor() as cursor:\n result = cursor.execute(sql_check)\n\n return result == 1\n\n def driver_current(self):\n return config['database']['driver']\n\n def make_migrations_table(self, connection):\n with open('pymi/resources/migrations.sql') as f:\n sql = f.read()\n\n with connection.cursor() as cursor:\n cursor.execute(sql)\n\n # Commit the changes.\n connection.commit()\n\n def retrieve(self, action):\n path = config['migrations_folder'] + '/*_' + action + '_*.sql'\n\n migrations_full = glob.glob(path)\n\n # Sort the migrations ascending, basically.\n migrations_full.sort()\n\n migrations = []\n list = []\n\n for migration in migrations_full:\n replaced = migration.replace(config['migrations_folder'] + '/', '')\n replaced = replaced.replace('.sql', '')\n\n migrations.append(replaced)\n list.append((migration, replaced))\n\n return {\n 'full': migrations_full,\n 'list': list,\n 'migrations': migrations\n }\n","sub_path":"pymi/migrator.py","file_name":"migrator.py","file_ext":"py","file_size_in_byte":2183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"622487136","text":"import json, sys\nimport sqlite3\nfrom collections import OrderedDict\n\n\ntext = json.loads(open('./shijing.json', 'r').read())\ncon = sqlite3.connect(\"./shijing.db\")\n\nc = con.cursor()\nc.execute('''CREATE TABLE \"shijing\" (\"value\" INTEGER NOT NULL PRIMARY KEY,\n \"title\" VARCHAR(255) NOT NULL,\n \"chapter\" VARCHAR(255) NOT NULL,\n \"section\" VARCHAR(255) NOT NULL,\n \"content\" TEXT)''')\n\nfor i in text:\n s=\"\"\n for j in i[\"content\"]:\n s+=j+\"\\n\"\n c.execute(\"INSERT INTO shijing (title, chapter, section, content) VALUES (?,?,?,?)\",(i[\"title\"],i[\"chapter\"],i[\"section\"],s))\n \ncon.commit()\ncon.close()\n","sub_path":"shijing/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"203878283","text":"import io\nimport re\nimport unicodedata\nimport numpy as np\nimport tensorflow as tf\n\ndef unicode_to_ascii(s):\n\treturn ''.join(c for c in unicodedata.normalize('NFD', s)\n\t\tif unicodedata.category(c) != 'Mn')\n\ndef preprocess_eng(w):\n\tw = unicode_to_ascii(w.lower().strip())\n\n\t# replace substring\n\tw = re.sub(r\"([?.!,¿])\", r\" \\1 \", w)\n\tw = re.sub(r'[\" \"]+', \" \", w)\n\tw = re.sub(r\"[^a-zA-Z?.!,¿]+\", \" \", w)\n\tw = w.rstrip().strip()\n\n\t# add start/end tags\n\tw = ' ' + w + ' '\n\treturn w\n\ndef preprocess_kor(w):\n\t# replace substring\n\tw = re.sub(r'([?.!,¿])', r' \\1 ', w)\n\tw = re.sub(r'[\" \"]+', ' ', w)\n\tw = re.sub(r'[^ ㄱ-ㅣ가-힣]+', ' ', w)\n\tw = w.rstrip().strip()\n\n\t# add start/end tags\n\tw = ' ' + w + ' '\n\treturn w\n\ndef read_data(filename):\n\tlines = io.open(filename, encoding='UTF-8').read().strip().split('\\n')\n\tword_pairs = np.array([l.rstrip().split('\\t') for l in lines])\n\teng = list(map(preprocess_eng, word_pairs[:, 0]))\n\tkor = list(map(preprocess_kor, word_pairs[:, 1]))\n\treturn eng, kor\n\ndef tokenize(sent, min_count=5):\n\tsent_tokenizer = tf.keras.preprocessing.text.Tokenizer(oov_token='', filters='')\n\tsent_tokenizer.fit_on_texts(sent)\n\twc_list = [sent_tokenizer.word_counts[t] for t in dict(sent_tokenizer.word_counts)]\n\n\tinfreq_words = [k for k, c in sent_tokenizer.word_counts.items() if c < min_count]\n\tfor w in infreq_words:\n\t\tdel sent_tokenizer.word_index[w]\n\t\tdel sent_tokenizer.word_docs[w]\n\t\tdel sent_tokenizer.word_counts[w]\n\n\tsequences = sent_tokenizer.texts_to_sequences(sent)\n\tsequences = tf.keras.preprocessing.sequence.pad_sequences(sequences, padding='post')\n\treturn sequences, sent_tokenizer, wc_list\n","sub_path":"code/nmt/source/input_data.py","file_name":"input_data.py","file_ext":"py","file_size_in_byte":1657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"559298054","text":"#! /usr/bin/env python\n# -*- coding: ascii -*-\n\nfrom __future__ import division\n\nimport sys\nimport os\n\nimport shutil\nimport subprocess\n\n\nGitRepos = (\n\t'https://github.com/bugnano/frigame.git',\n\t'https://github.com/bugnano/frigame_sorted.git',\n)\n\n\ndef main():\n\troot = os.getcwd()\n\tif not os.path.isdir('deps'):\n\t\tos.mkdir('deps')\n\n\tos.chdir('deps')\n\tfor repo in GitRepos:\n\t\tsubprocess.call(['git', 'clone', repo])\n\n\nif __name__ == '__main__':\n\tmain()\n\n","sub_path":"install_deps.py","file_name":"install_deps.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"161119143","text":"def num(inF):\n D = {}\n inFile = open(inF)\n ouFile = open('ERR0498-04-05.unmapped.unique.human-viruse-checked', 'w')\n ouFile2 = open('ERR0498-04-05.unmapped.unique.human-viruse-checked-not', 'w')\n for line in inFile:\n line = line.strip()\n fields = line.split('\\t')\n\n ch = fields[3]\n query_start1 = int(fields[8])\n query_end1 = int(fields[9])\n query_start2 = int(fields[20])\n query_end2 = int(fields[21])\n\n subject_start1 = int(fields[10])\n subject_end1 = int(fields[11])\n subject_start2 = int(fields[22])\n subject_end2 = int(fields[23])\n\n '''\n if (query_start1 + query_end1) <= (query_start2 + query_end2):\n if 1 <= query_start1 <= 10 and 34 <= query_end1 <= 46 \\\n and 35 <= query_start2 <= 46 and 65 <= query_end2 <= 76:\n ouFile.write(line + '\\n')\n else:\n ouFile2.write(line + '\\n')\n else:\n if 35 <= query_start1 <= 46 and 65 <= query_end1 <= 76 \\\n and 1 <= query_start2 <= 10 and 34 <= query_end2 <= 46:\n ouFile.write(line + '\\n')\n else:\n ouFile2.write(line + '\\n')\n '''\n if (query_start1 + query_end1) <= (query_start2 + query_end2):\n if 1 <= query_start1 <= 2 and 30 <= query_end1 <= 45 \\\n and 30 <= query_start2 <= 45 and 75 <= query_end2 <= 76:\n ouFile.write(line + '\\n')\n else:\n ouFile2.write(line + '\\n')\n else:\n if 30 <= query_start1 <= 45 and 75 <= query_end1 <= 76 \\\n and 1 <= query_start2 <= 2 and 30 <= query_end2 <= 45:\n ouFile.write(line + '\\n')\n else:\n ouFile2.write(line + '\\n')\n\n ouFile.close()\n ouFile2.close()\n\nnum('ERR0498-04-05.unmapped.unique.human-viruse-check')\n","sub_path":"RNAseqMSMS/23-rna-seq-virus-stats/HeLa-human-virus-final/1-1-filter.py","file_name":"1-1-filter.py","file_ext":"py","file_size_in_byte":1914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"77598320","text":"import queue\nimport random\nimport numpy as np\n\nk=input('Enter the buffer size:')\nk=int(k)+1\nprob_1=input('Enter the probability to choose:')\nprob_1=float(prob_1)\nans1_1=[]\nans1_2=[]\nans1_3=[]\nans2_1=[]\nans2_2=[]\nans2_3=[]\nans3_1=[]\nans3_2=[]\nans3_3=[]\nans4_1=[]\nans4_2=[]\nans4_3=[]\n\nfor i in range(5):\n q=queue.Queue(k)\n q2=queue.Queue(k)\n \n \n count=0\n count1=0\n q1_processed=0\n count2_1=0\n q2_processed=0\n t_record1=0\n t_record2=0\n t_record2_2=0\n rate_l=8\n rate_m=5\n c=0\n pkt_delay1=[]\n pkt_delay2=[]\n flag1=False\n flag2=False\n timecount1=0\n timecount2=0\n \n \n while(count<5000):\n count+=1\n t=random.expovariate(rate_l)\n t_record1+=t\n while(t_record2<=t_record1 and (not q.empty())):\n if(not flag1):\n t_out=random.expovariate(rate_m)\n t_record2+=t_out\n if(t_record21000):\n c+=1\n pkt_delay1=pkt_delay1+[t_record1]\n else:\n count2_1+=1\n if(not q2.full()):\n q2.put(count)\n if(count>1000):\n c+=1\n pkt_delay2=pkt_delay2+[t_record1]\n ans1_1=ans1_1+[1-(q1_processed+q2_processed)/(count-q.qsize()-q2.qsize())]\n ans1_2=ans1_2+[1-(q1_processed/(count1-q.qsize()))]\n ans1_3=ans1_3+[1-(q2_processed/(count2_1-q2.qsize()))]\n ans2_1=ans2_1+[sum(pkt_delay1[int(1000*prob_1):-k])/(len(pkt_delay1)-int(1000*prob_1)-k)]\n ans2_2=ans2_2+[sum(pkt_delay2[1000-int(1000*prob_1):-k])/(len(pkt_delay2)-1000+int(1000*prob_1)-k)]\n ans2_3=ans2_3+[ans2_1[-1]*(len(pkt_delay1)-500-k)/(len(pkt_delay1)-500-k+len(pkt_delay2)-500-k)+ans2_2[-1]*(len(pkt_delay2)-500-k)/(len(pkt_delay1)-500-k+len(pkt_delay2)-500-k)]\n ans3_1=ans3_1+[(q1_processed-int(1000*prob_1)+q.qsize())/(t_record1-timecount1)]\n ans3_2=ans3_2+[(q2_processed-1000+int(1000*prob_1)+q2.qsize())/(t_record1-timecount2)]\n ans3_3=ans3_3+[ans3_1[-1]+ans3_2[-1]]\n ans4_1=ans4_1+[ans2_1[-1]*rate_l*prob_1*(1-ans1_2[-1])]\n ans4_2=ans4_2+[ans2_2[-1]*rate_l*(1-prob_1)*(1-ans1_3[-1])]\n \n \n \nprint(\"Total packets:\",count, \"packets going to q1:\",count1,\"q1 processed packets\",q1_processed,\"packets going to q2\",count2_1,\"q2 processed packets\",q2_processed,c)\nprint(\"queue 1 blocking prob:\",np.mean(ans1_2))\nprint(\"queue 2 blocking prob:\",np.mean(ans1_3))\nprint(\"system blocking prob:\", np.mean(ans1_1))\nprint(\"average delay for queue1:\",np.mean(ans2_1))\nprint(\"average delay for queue2:\",np.mean(ans2_2))\nprint(\"throughput in the system:\", np.mean(ans3_3))\nprint(\"throughput in queue 1:\",np.mean(ans3_2))\nprint(\"throughput in queue 2:\",np.mean(ans3_1))\nprint(\"average delay for system:\",np.mean(ans2_3))\nprint(\"average number of packets in queue1\",np.mean(ans4_1))\nprint(\"average number of packets in queue2\",np.mean(ans4_2))\nprint(\"average number of packets in system\",np.mean(ans4_1)+np.mean(ans4_2))\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"634918316","text":"from random import randint\nfrom collections import deque\n\nN = randint(0, 100)\nhistory = deque([], 5)\n\n\ndef guess(k):\n if k == N:\n print('Right')\n return True\n if k < N:\n print('%s is less-than N' % k)\n else:\n print('%s is greater-than N' % k)\n return False\n\n\nwhile True:\n line = input('Please input a number:')\n if line.isdigit():\n k = int(line)\n history.append(k)\n if guess(k):\n break\n elif line == 'history' or line == 'h':\n print(list(history))\n\n\n\n\n\n# plckle 基本使用\n# from collections import deque\n# import pickle\n#\n# q = deque([2, 3, 4, 5, 6])\n# print(q)\n#\n# # 将数据存入文件\n# pickle.dump(q, open('history', 'wb'))\n# # 读取文件\n# q2 = pickle.load(open('history', 'rb'))\n# print(q2)\n","sub_path":"7_py实用编程技巧/1_数据结构与算法/1_7实现用户的历史记录功能.py","file_name":"1_7实现用户的历史记录功能.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"420197167","text":"\n__author__ = 'Lokesh Sanapalli'\n__version__ = '1.0'\n''' This code recurisvely reads the files inside directories\nand displays output'''\n\nimport os\ndef read_files(root):\n if os.path.isdir(root)==True:\n dirList=os.listdir(root)\n for x in dirList:\n read_files(os.path.join(root,x))\n else:\n with open(root) as f:\n out=f.read()\n print('contents in {0}'.format(out))\n\nif __name__ == '__main__':\n read_files('/home/lokesh1729/myPython/Root')\n","sub_path":"rf.py","file_name":"rf.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"303787679","text":"from django.http import HttpResponse\nfrom django.shortcuts import render_to_response, redirect, render\nfrom django.contrib.auth import logout as auth_logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.template import RequestContext\nfrom apps.home import forms\n\nfrom Core.BLL import *\n\n\n# Create your views here.\n@login_required(login_url='/login/')\ndef index(request):\n if request.user.is_authenticated():\n user = request.user\n name = user.first_name + \" \" + user.last_name\n context = {\n 'name': name\n }\n return render(request, 'home/index.html', context)\n return render(request, 'home/index.html', \"\")\n\n\ndef login(request):\n return render(request, 'home/login.html', \"\")\n\n\n@login_required(login_url='/login/')\ndef logout(request):\n auth_logout(request)\n return redirect('/')\n\n\n@login_required(login_url='/login/')\ndef question(request):\n return render(request, 'home/question.html', \"\")\n\n\n@login_required(login_url='/login/')\ndef detail(request, pid):\n topics = [\n 'Python',\n 'Django',\n 'Open source',\n 'Pycharm'\n ]\n postOrQuestion = PostBLL.SelectById(pid)\n context = {\n 'topics': topics,\n 'title': postOrQuestion.title,\n 'content': postOrQuestion.content,\n 'type_id': postOrQuestion.type_id\n }\n return render(request, 'home/detail.html', context)\n\n\n@login_required(login_url='/login/')\ndef question_insert(request):\n title = str(request.POST['question_title'])\n isAnonymous = False\n if request.POST.get('is_anonymous', True) == \"on\":\n isAnonymous = True\n isExpand = request.POST['is_expand']\n content = request.POST['question_detail']\n userId = request.user.id\n if (isExpand == \"1\"):\n PostBLL.Insert(1, [], title, content, userId, isAnonymous)\n else:\n PostBLL.Insert(1, [], title, \"\", userId, isAnonymous)\n\n # NotificationBLL.create_notification(userId)\n\n return redirect('/detail/' + str(PostBLL.SelectLastQuestion(userId).id))\n\n\ndef schedule(request):\n return render(request, \"home/schedule.html\", \"\")\n\n\n@login_required(login_url='/login/')\ndef post(request):\n return render(request, \"home/post.html\", {'form': forms.PostForm})\n\n\n@login_required(login_url='/login/')\ndef post_insert(request):\n title = request.POST['title']\n content = request.POST['content']\n userId = request.user.id\n PostBLL.Insert(2, [], title, content, userId, 0)\n\n # NotificationBLL.create_notification(userId)\n\n return redirect('/detail/' + str(PostBLL.SelectLastPost(userId).id))\n\n\ndef ajax_load_more(request):\n if request.is_ajax():\n total = 1\n offset = int(request.GET.get('offset', '0'))\n end = offset + total\n template = 'home/load_more_items.html'\n if (offset < len(PostBLL.SelectAll())):\n feeds = PostBLL.SelectWithoutDownvotePost(request.user.id)[offset:end]\n data = {\n 'logged_user_id': request.user.id,\n 'posts': feeds,\n }\n return render_to_response(template, data)\n else:\n return render_to_response(template, None)\n\n\ndef ajax_delete_post(request):\n if request.is_ajax():\n PostBLL.Delete(request.GET.get('id', '0'))\n return HttpResponse()\n\n\ndef ajax_downvote_post(request):\n if request.is_ajax():\n PostBLL.IncreaseDownVote(request.GET.get('id', '0'), request.user.id)\n return HttpResponse()\n\n\ndef ajax_undo_downvote_post(request):\n if request.is_ajax():\n PostBLL.DecreaseDownvote(request.GET.get('id', '0'), request.user.id)\n return HttpResponse()\n","sub_path":"apps/home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"162331175","text":"# coding: UTF-8\n\n\nclass ListNode(object):\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\nclass Solution(object):\n def detectCycle(self, head):\n fast = slow = head\n if head is None or head.next is None:\n return None\n\n while fast is not None and fast.next is not None:\n fast = fast.next.next\n slow = slow.next\n if fast == slow:\n break ;\n\n if fast is None or fast.next is None:\n return None\n\n fast = head\n while fast != slow:\n fast = fast.next\n slow = slow.next\n\n return slow\n\n\n\n","sub_path":"leetcode/linkedList/LinkedListCycleII.py","file_name":"LinkedListCycleII.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"120037357","text":"import json\nimport re\nimport time\nfrom datetime import datetime\nfrom random import random\nimport requests\n\n\nclass Pin(object):\n def __init__(self, pidId, personaId, dob, sid, pinUrl):\n self.pinUrl = pinUrl\n self.pidId = pidId\n self.personaId = personaId\n self.dob = dob\n self.sid = sid\n self.s = 1\n\n self.r = requests.Session()\n self.r.headers = {\n 'Accept': '*/*',\n 'Accept-Encoding': 'gzip, deflate, br',\n 'Accept-Language': 'en-GB,en-US;q=0.9,en;q=0.8',\n 'Connection': 'keep-alive',\n 'Host': 'accounts.ea.com',\n 'Origin': 'https://www.easports.com',\n 'Referer': 'https://www.easports.com/fifa/ultimate-team/web-app/',\n 'Sec-Fetch-Dest': 'empty',\n 'Sec-Fetch-Mode': 'cors',\n 'Sec-Fetch-Site': 'cross-site',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.113 Safari/537.36'\n }\n self.r.headers['x-ea-game-id'] = 'FUT20WEB'\n self.r.headers['x-ea-game-id-type'] = 'easku'\n self.r.headers['x-ea-taxv'] = '1.1'\n\n def ts(self):\n ts = datetime.utcnow()\n ts = ts.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'\n return ts\n\n def event(self, en, status=False, pgid=False, source=False, end_reason=False):\n data = {\n 'core': {\n 's': 1,\n 'pidt': 'persona',\n 'pid': self.personaId,\n 'ts_event': self.ts(),\n 'en': en,\n 'pidm': {\n 'nucleas': self.pidId\n },\n }\n }\n data['core']['dob'] = self.dob\n\n if pgid:\n data['pgid'] = pgid\n if status:\n data['status'] = status\n if source:\n data['source'] = source\n if end_reason:\n data['end_reason'] = end_reason\n\n if en == 'login':\n data['type'] = 'utas'\n data['userid'] = self.personaId\n elif en == 'page_view':\n data['type'] = 'menu'\n elif en == 'error':\n data['server_type'] = 'utas'\n data['errid'] = 'server_error'\n data['type'] = 'disconnect'\n data['sid'] = self.sid\n\n self.s += 1\n\n return data\n\n def send(self, events):\n time.sleep(0.5 + random() / 50)\n data = {\n 'custom': {\n 'networkAccess': 'G', 'service_plat': 'ps4'\n },\n 'et': 'client',\n 'events': events,\n 'gid': 0,\n 'is_sess': True,\n 'loc': 'en_US',\n 'plat': 'web',\n 'rel': 'prod',\n 'taxv': '1.1',\n 'tid': 'FUT20WEB',\n 'tidt': 'easku',\n 'ts_post': self.ts(),\n 'v': '20.4.2',\n 'sid': self.sid\n }\n self.r.options(self.pinUrl)\n rc = self.r.post(self.pinUrl, data=json.dumps(data)).json()\n if rc['status'] != 'ok':\n print('pin event not okay')\n \n return True","sub_path":"pin.py","file_name":"pin.py","file_ext":"py","file_size_in_byte":3144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"492619449","text":"# before starting make sure mongod has been started in order to start the service\nfrom pymongo import MongoClient\n\n# client = MongoClient('localhost', 27017)\nclient = MongoClient('mongodb://localhost:27017/')\n\nTEST_DB_NAME = 'my_first_db'\n\n\ndef test_current_state(client):\n print(client)\n print(client.database_names()) # <-- current databases. show dbs command. if no dbs yet show default one\n\n\ndef test_simple_insert(client):\n try:\n client.drop_database(TEST_DB_NAME) # just in case it exists\n db = client[TEST_DB_NAME] # <-- use 'database' command\n # db = client.my_first_db # this is also possible\n print(db)\n\n print(client.database_names()) # <-- default dbs. show dbs command. Still not created till we insert something\n print(db.collection_names(include_system_collections=False)) # collections in 'new_db'\n\n # now I create a collection inside of 'new_db'\n collection_test = db.collection_test\n # collection_test = db['collection_test']\n collection_test.insert({'a': 1})\n\n print(collection_test)\n print(client.database_names())\n print(db.collection_names(include_system_collections=False))\n\n # finally we delete everything\n collection_test.drop()\n print(db.collection_names(include_system_collections=False))\n\n except Exception as e:\n print(\"Error: \" + str(e))\n\n finally:\n print('##### Empty Result #######')\n client.drop_database(TEST_DB_NAME)\n print(client.database_names())\n client.close()\n\n\ndef main():\n test_current_state(client)\n test_simple_insert(client)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"551475524","text":"#!/usr/bin/env python\n\nimport xml.etree.ElementTree as ET\nimport os\nimport glob\nimport time\nimport subprocess\nimport re\nimport sys\nsys.dont_write_bytecode = True # prevent creation of .pyc files\n\n\"\"\"\nThis file takes an execution results directory as an argument, and generates a\nlist of all output xml results files. For each output xml result file, it runs\ncpachecker as a witness checker on each benchmark for which an error was found\n(i.e., had a 'false' result).\n\nThe original output xml file is modified in memory to include results statuses\nfrom the witness checker for each benchmark 'false' benchmark, as well as\nenabling the witness related output file download links.\n\nThe modified in-memory output xml is then written as a new output xml file, with\nthe string \".witchecked\" injected into the original output xml file name. E.g.,\nthis file will write a file called \"a.witchecked.Simple.xml\" if it encounters an\noriginal output xml file called \"a.Simple.xml\".\n\"\"\"\n\nif not 2 == len(sys.argv) or not os.path.isdir(sys.argv[1]):\n print\n print(\"Usage:\\t\" + sys.argv[0] + \" EXECRESULTDIR\")\n print\n print(\"EXECRESULTDIR\\tA path to a directory containing a SMACKBench\")\n print(\"\\t\\texecution result set, on which to check witnesses\")\n exit()\n\nexecDir = os.getcwd()\ntargetDir = sys.argv[1]\n\ntargetDir = os.path.join(targetDir, 'results')\noutXmls = glob.glob(targetDir + '/*results*.xml')\n\nfor outXml in outXmls:\n baseXml,setName = os.path.splitext(os.path.splitext(outXml)[0])\n outXmlNew = baseXml + '.witchecked' + setName + '.xml'\n tree = ET.parse(outXml)\n\n root = tree.getroot()\n runName = root.get('name')\n benchmarkName = root.get('benchmarkname')\n runDate = root.get('date')\n runDate = time.strptime(runDate, \"%Y-%m-%d %H:%M:%S %Z\")\n runDate = \"{0:04d}-{1:02d}-{2:02d}_{3:02d}{4:02d}\".format(runDate.tm_year,\n runDate.tm_mon,\n runDate.tm_mday,\n runDate.tm_hour,\n runDate.tm_min)\n runTimelimit = root.get('timelimit')\n witTimelimit = int(runTimelimit.split()[0])/10\n \n pathPrefix = benchmarkName + \".\" + runDate\n logFolder = os.path.join(targetDir, pathPrefix + \".logfiles\")\n fileRoot = os.path.join(logFolder, runName)\n for run in root.findall('run'):\n sourcefile = run.get('name')\n # Get property file from input benchmark folder\n propfile = os.path.join(os.path.join('data', os.path.split(sourcefile)[0]), 'ALL.prp')\n # Now set sourcefile to where it WOULD be in output folders, \n # given the folder structure of the actual input folder\n tokenizedInputFile = os.path.join('data', sourcefile)\n print(tokenizedInputFile)\n sourcefile = os.path.join(fileRoot,sourcefile)\n basefile = os.path.splitext(sourcefile)[0]\n\n witnessfile = sourcefile + '.witness.graphml'\n witnesscheckOutput = sourcefile + '.witnessCheckOutput'\n categoryCol = run.find('./column[@title=\"category\"]')\n statusCol = run.find('./column[@title=\"status\"]')\n outputfilesCol = run.find('./column[@title=\"Output Files\"]')\n # Make sure columns existed (they don't when runSet was terminated early)\n if categoryCol is not None and statusCol is not None:\n category = categoryCol.get('value')\n status = statusCol.get('value')\n # We only need to witness check if we got the answer right\n # and the verification result was false\n if 'correct' in category and 'false' in status:\n # Use runexec to enforce time limit\n # cpachecker complains if working directory isn't the cpachecker\n # directory, so we have to adjust paths to match this requirement\n cmd = ['../benchexec/bin/runexec']\n cmd += ['--output', '../' + witnesscheckOutput]\n cmd += ['--timelimit', str(witTimelimit)]\n cmd += ['--']\n # Below this point are the witness checking commands\n cmd += ['./scripts/cpa.sh']\n cmd += ['-noout']\n cmd += ['-heap', '16000M']\n cmd += ['-spec', '../' + witnessfile]\n cmd += ['-spec', '../' + propfile]\n cmd += ['../' + tokenizedInputFile]\n os.chdir('cpachecker')\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n cmdOut = p.communicate()[0]\n checktime = float(re.search('cputime=(.*)s', cmdOut.decode('utf-8')).group(1))\n \n os.chdir(execDir)\n witSuccess = False\n witTimeout = False\n with open(witnesscheckOutput, 'r') as witout:\n output = witout.read()\n witSuccess = re.search('Verification result:\\s*FALSE', output)\n witTimeout = re.search('EDIT THIS WHEN YOU KNOW TIMEOUT STRING', output)\n if witSuccess:\n statusCol.set('value','witness confirmed')\n else:\n statusCol.set('value', 'something went wrong')\n if outputfilesCol is not None:\n newVal = outputfilesCol.get('value').replace(' hidden','')\n outputfilesCol.set('value', newVal)\n ET.SubElement(run, 'column', {'title':'time(s)\\nfor\\nwitness',\n 'value':\"{0:.3f}s\".format(checktime)})\n tree.write(outXmlNew)\n","sub_path":"svcomp/bench/src/checkWitnesses.py","file_name":"checkWitnesses.py","file_ext":"py","file_size_in_byte":5715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"175947281","text":"# -*- coding: utf-8 -*-\n# @Time : 2021/3/3 12:23\n# @Author : demi\n# @Email : demilxj@foxmail.com\n# @File : read_config.py\n\nimport configparser\nfrom tools.project_path import *\n\n\nclass ReadConfig:\n\n @staticmethod\n def get_config(file_path, section, option):\n cf = configparser.ConfigParser()\n cf.read(file_path)\n return cf[section][option]\n\n\nif __name__ == '__main__':\n print(ReadConfig.get_config(test_config_path, 'MODE', 'mode'))","sub_path":"tools/read_config.py","file_name":"read_config.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"70726636","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\n\nclass StatisticalAnalysis:\n\n def __init__(self, targetdir):\n self.targetdir = targetdir\n \n def analyse(self):\n df_list = [pd.read_table(self.targetdir+file, delim_whitespace=True) \\\n for file in sorted(os.listdir(self.targetdir))]\n df = pd.concat(df_list, axis=1)\n\n for i, column in enumerate(df):\n if i == 9:\n print(column)\n print(\"bootstrap: \", self.bootstrap(df[column].values, 100))\n print(\"blocking: \", self.blocking(df[column].values, 100), \"\\n\")\n \n def bootstrap(self, vector, nBoots):\n N = len(vector)\n avgVec = np.zeros(nBoots)\n for i in range(0, nBoots):\n avgVec[i] = np.average(np.random.choice(vector, N))\n return np.average(avgVec), np.var(avgVec)\n \n def blocking(self, vector, minBlockSize):\n N = len(vector)\n avgVec = np.zeros(minBlockSize)\n varVec = np.zeros(minBlockSize)\n blocksVec = np.zeros(minBlockSize)\n for i in range(1, minBlockSize+1):\n nBlocks = N//i\n means = np.zeros(nBlocks)\n for j in range(nBlocks):\n means[j] = np.mean(vector[j*i:(j+1)*i])\n avgVec[i-1] = np.mean(means)\n varVec[i-1] = np.var(means)\n blocksVec[i-1] = nBlocks\n plt.plot(blocksVec, varVec)\n plt.xlabel(\"Blocksize\")\n plt.ylabel(\"Variance\")\n plt.title(\"Variance as function of block size for N_p=100, N_d=3 and alpha=0.47\")\n plt.savefig(\"../data/plots/blocking.png\")\n return avgVec[-1], varVec[-1] \n\nif __name__ == \"__main__\":\n S = StatisticalAnalysis('../data/energies/')\n S.analyse()\n","sub_path":"Project1/python/block_bootstrap.py","file_name":"block_bootstrap.py","file_ext":"py","file_size_in_byte":1799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"100800897","text":"from pathlib import Path\nfrom app.core.config import LOGGING_LEVEL\n\nBASE_DIR = Path(__file__).resolve().parent.parent\n\nLOGGER = {\n \"path\": str(Path(BASE_DIR / 'log/access.log')),\n \"level\": LOGGING_LEVEL,\n \"rotation\": \"20 days\",\n \"retention\": \"1 months\",\n \"format\": \"{level: <8} {time:YYYY-MM-DD HH:mm:ss.SSS} - {name}:{function}:{line} - {message}\"\n}\n\n","sub_path":"app/config/logging_config.py","file_name":"logging_config.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"576544666","text":"# Collin Gros\n#\n# to sort the files in a folder on my desktop\n#\n# TODO:\n# complete!\n# maybe add usr input for in/out dirs?\n\n# also, i know using lists for file extensions is probably\n# retarded. some guy out there has definitely made a huge list\n# of file extensions. plus, even using the file extension\n# method to differentiate between files is retarded. i just\n# decided to use it because most of the files i had to sort were\n# on windows, which requires file extensions. doing so was particularly\n# easier for me than learning how to parse output from\n# file {filename} | grep blah blah blah don't know\n\n# WARNING: will overwrite files with same name\n#\nimport os\n\n\ndef sort_files(file_list):\n docs = [\"txt\", \"pdf\", \"iso\", \"zip\",\n \"7z\", \"docx\", \"odt\", \"xlsx\",\n \"deb\", \"md\", \"pptx\"]\n code = [\"c\", \"css\", \"js\", \"html\",\n \"h\", \"sh\", \"cmake\", \"xml\",\n \"pickle\", \"py\", \"yml\", \"url\",\n \"cpp\", \"java\", \"class\"]\n music = [\"sunvox\", \"sunsynth\", \"xi\",\n \"mp3\", \"flac\", \"wav\", \"xm\",\n \"reason\", \"repatch\", \"ufs\",\n \"ogg\", \"sfk\", \"mid\", \"m4r\"]\n media = [\"mp4\", \"avi\", \"m4a\", \"jpg\",\n \"png\", \"m4\", \"bmp\", \"webm\",\n \"gif\", \"jpeg\", \"mov\", \"wma\",\n \"3gp\", \"thm\", \"mts\", \"aae\",\n \"m2ts\"]\n\n for file_path in file_list:\n #print(\"FILEPATH:\\t{0}\".format(file_path))\n in_docs = False\n in_code = False\n in_music = False\n in_media = False\n unknown = False\n\n file_substr = file_path.split(\"/\")\n file_title = file_substr[-1]\n file_substr_ext = file_path.split(\".\")\n ext = file_substr_ext[-1].lower()\n\n in_steam = \"SteamLibrary\" in file_path\n\n if ext in docs:\n in_docs = True\n elif ext in code:\n in_code = True\n elif ext in music:\n in_music = True\n elif ext in media:\n in_media = True\n\n if not (in_docs or in_code or in_music or in_media):\n unknown = True\n #print(\"unknown ext:\\t{0}\".format(ext))\n\n dir_name = \"\"\n if in_steam:\n dir_name = \"sorted/steam/\"\n elif in_docs:\n dir_name = \"sorted/docs/\"\n elif in_code:\n dir_name = \"sorted/code/\"\n elif in_music:\n dir_name = \"sorted/music/\"\n elif in_media:\n dir_name = \"sorted/pics/\"\n elif unknown:\n dir_name = \"sorted/unknown/\"\n\n\n new_path = \"/media/surv/2TB/\" + dir_name + file_title\n #print(\"moving from {0} to {1}\".format(file_path, new_path))\n os.rename(file_path, new_path)\n\n\ndef get_files(path):\n file_list = []\n for root, dirs, files in os.walk(path):\n for file in files:\n file_path = os.path.join(root, file)\n file_list.append(file_path)\n\n return file_list\n\n\nsort_path = \"/media/surv/2TB/\"\nfile_list = get_files(sort_path)\n\nsort_files(file_list)\n#for elm, value in enumerate(exts):\n# print(\"elm:\\t{0}\\tvalue:\\t{1}\".format(elm, value))\n","sub_path":"file_sort.py","file_name":"file_sort.py","file_ext":"py","file_size_in_byte":3061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"85203609","text":"'''\nThe Hamming distance between two integers is the number of positions at which the corresponding \nbits are different.\n\nNow your job is to find the total Hamming distance between all pairs of the given numbers.\n\nExample:\nInput: 4, 14, 2\n\nOutput: 6\n\nExplanation: In binary representation, the 4 is 0100, 14 is 1110, and 2 is 0010 (just\nshowing the four bits relevant in this case). So the answer will be:\nHammingDistance(4, 14) + HammingDistance(4, 2) + HammingDistance(14, 2) = 2 + 2 + 2 = 6.\nNote:\nElements of the given array are in the range of 0 to 10^9\nLength of the array will not exceed 10^4.\n'''\n\ndef total_hamming_distance(nums):\n total = 0\n for i in range(32):\n set_bits = 0\n for num in nums:\n if num & (1 << i): set_bits += 1\n total += set_bits * (len(nums) - set_bits)\n return total\n\ndef total_hamming_distance_v2(nums):\n result = 0\n count = 0\n for i in range(len(nums) - 1):\n for j in range(i + 1, len(nums)):\n val1, val2 = nums[i], nums[j]\n for _ in range(32):\n count += 1\n result += (val1 & 1 != val2 & 1)\n val1 >>= 1\n val2 >>= 1\n return count \n\n\n","sub_path":"algorithms/bits/total_hamming_distance.py","file_name":"total_hamming_distance.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"628898070","text":"import boto3\nimport logging\nimport os\nimport json\n\ndef get_environment_variables():\n queue_url = os.environ[\"QUEUE_URL\"]\n depth = os.environ['DEPTH']\n bucket = os.environ['BUCKET']\n return queue_url, bucket, depth\n\ndef handler(event, context):\n logger = logging.getLogger()\n logger.setLevel(logging.DEBUG)\n\n # Start clients\n s3 = boto3.client('s3')\n sqs = boto3.client(\"sqs\")\n\n queue_url, bucket, depth = get_environment_variables()\n\n # Getting the content of csv inside s3 bucket\n try:\n query_table = event['Records'][0]['s3']['object']['key']\n\n except KeyError:\n logger.error('ERROIntegration error with S3. Try again later')\n return {\n 'statusCode': 500,\n 'body': json.dumps('Integration error with S3. Try again later')\n }\n \n csv_response = s3.get_object(Bucket=bucket, Key=query_table)\n csv_reader = csv_response['Body'].read().decode('utf-8').split('\\r\\n')\n\n # Retrieve the queue url, in order to use sqs api\n logger.info(\"Queue URL is %s\", queue_url)\n\n # Sending entries to sqs\n try:\n # Creating the list to send in batch format for sqs\n i = 0\n\n # We need to apply this because the method send_message_batch has a limit of 10 entries\n for i in range(int(len(csv_reader)//10)):\n sqs_entries = [{'Id': str(j), 'MessageBody': f'{{\"Link\": \"{link}\", \"Depth\": {depth}}}'}\n for j, link in enumerate(csv_reader[(i*10):(10*(i+1))])]\n resp = sqs.send_message_batch(QueueUrl=queue_url, Entries=sqs_entries)\n logger.info(\"Send result: %s\", resp)\n \n # In case of len(next_link) < 10\n if i == 0:\n i = -1\n\n sqs_entries = [{'Id': str(j), 'MessageBody': f'{{\"Link\": \"{link}\", \"Depth\": {depth}}}'}\n for j, link in enumerate(csv_reader[(10*(i+1)):])]\n resp = sqs.send_message_batch(QueueUrl=queue_url, Entries=sqs_entries)\n logger.info(\"Send result: %s\", resp)\n\n return {\n 'statusCode': 200,\n 'body': json.dumps('Done!')\n }\n\n except Exception as e:\n logger.error(\"Got error: %s\", str(e))\n return {\n 'statusCode': 500,\n 'body': json.dumps(str(e))\n }\n","sub_path":"app/batch_event/lambda_batch_event.py","file_name":"lambda_batch_event.py","file_ext":"py","file_size_in_byte":2289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"488082753","text":"\"\"\"Helper tools for jsonschema.\"\"\"\n\nfrom __future__ import absolute_import\n\nimport os\n\nimport decorator\nimport jsonschema\n\n\n_SCHEMA_DIR = os.path.abspath(\n os.path.join(os.path.dirname(__file__), '..', '..', '..', 'schema'))\n\n_TEST_MODE = False\n\n\ndef schema(*schemas, **kwschemas):\n \"\"\"Schema decorator.\"\"\"\n resolver = jsonschema.RefResolver('file://' + _SCHEMA_DIR + '/', None)\n validators = [\n jsonschema.Draft4Validator(s, resolver=resolver)\n for s in schemas\n ]\n\n kwvalidator = jsonschema.Draft4Validator({\n 'type': 'object',\n 'additionalProperties': False,\n 'properties': kwschemas,\n }, resolver=resolver)\n\n def validate(args, kwargs):\n \"\"\"Validate function arguments.\"\"\"\n validated_args = []\n for validator, arg in zip(validators, args):\n validator.validate(arg)\n validated_args.append(arg)\n if kwargs:\n kwvalidator.validate(kwargs)\n\n return validated_args, kwargs\n\n @decorator.decorator\n def decorated(func, *args):\n \"\"\"Validates arguments given schemas.\"\"\"\n # decorator.decorator swallows kwargs for some reason.\n argspec = decorator.getargspec(func)\n defaults = []\n if argspec.defaults:\n defaults = argspec.defaults\n\n kwargs = {}\n for kw_name, kw_value, kw_default in zip(reversed(argspec.args),\n reversed(args),\n defaults):\n if kw_value != kw_default:\n kwargs[kw_name] = kw_value\n\n if len(defaults):\n args = list(args)[:-len(defaults)]\n else:\n args = list(args)\n valid_args, valid_kwargs = validate(args, kwargs)\n if not _TEST_MODE:\n return func(*valid_args, **valid_kwargs)\n\n return decorated\n","sub_path":"lib/python/treadmill/schema.py","file_name":"schema.py","file_ext":"py","file_size_in_byte":1883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"518960218","text":"from keras.callbacks import CSVLogger, ModelCheckpoint, EarlyStopping\nfrom keras.callbacks import ReduceLROnPlateau\nfrom keras.preprocessing.image import ImageDataGenerator\n\nfrom models import mini_XCEPTION, tiny_XCEPTION\nfrom data import load_emotion_data, split_data\nimport datetime\nimport os\n\n# parameters\nbatch_size = 32\nnum_epochs = 10000\ninput_shape = (64, 64, 1)\nvalidation_split = .2\nverbose = 1\nnum_classes = 7\npatience = 50\n\nnow = datetime.datetime.now()\nbase_path = 'models/'\nbase_path += now.strftime(\"%Y_%m_%d_%H_%M_%S\") + '/'\nif not os.path.exists(base_path):\n os.makedirs(base_path)\ndatasets_path = 'data/fer2013/fer2013.csv'\nmodel_name = \"mini_XCEPTION_{}x{}_\".format(input_shape[0], input_shape[1])\n\n# load fer2013 dataset\nlabels = ['angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral']\nfaces, emotions = load_emotion_data(datasets_path, input_shape)\n\n# data generator\ndata_generator = ImageDataGenerator(featurewise_center=False,\n featurewise_std_normalization=False,\n rotation_range=10,\n width_shift_range=0.1,\n height_shift_range=0.1,\n zoom_range=.1,\n horizontal_flip=True)\n\n# model parameters/compilation\n\nmodel = mini_XCEPTION(input_shape, num_classes)\nmodel.compile(optimizer='adam', loss='categorical_crossentropy',\n metrics=['accuracy'])\nmodel.summary()\n\n# begin training\nlog_file_path = base_path + 'emotion_training.log'\ncsv_logger = CSVLogger(log_file_path, append=False)\nearly_stop = EarlyStopping('val_loss', patience=patience)\nreduce_lr = ReduceLROnPlateau('val_loss', factor=0.1,\n patience=int(patience/4), verbose=1)\ntrained_models_path = base_path + 'emotion_' + model_name\nmodel_names = trained_models_path + '.{epoch:02d}-{val_acc:.2f}.hdf5'\nmodel_checkpoint = ModelCheckpoint(model_names, 'val_loss', verbose=1,\n save_best_only=True)\ncallbacks = [model_checkpoint, csv_logger, early_stop, reduce_lr]\n\n# loading dataset\nnum_samples, num_classes = emotions.shape\ntrain_data, val_data = split_data(faces, emotions, validation_split)\ntrain_faces, train_emotions = train_data\nmodel.fit_generator(data_generator.flow(train_faces, train_emotions,\n batch_size),\n steps_per_epoch=len(train_faces) / batch_size,\n epochs=num_epochs, verbose=1, callbacks=callbacks,\n validation_data=val_data)\n","sub_path":"keras/train_emotion_classifier.py","file_name":"train_emotion_classifier.py","file_ext":"py","file_size_in_byte":2616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"652112024","text":"# Copyright 2021 The Brax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Probability distributions in JAX.\"\"\"\n\nimport abc\nimport jax\nimport jax.numpy as jnp\nimport tensorflow_probability as tfp\n\ntfp = tfp.substrates.jax\ntfd = tfp.distributions\n\n\nclass ParametricDistribution(abc.ABC):\n \"\"\"Abstract class for parametric (action) distribution.\"\"\"\n\n def __init__(self, param_size, postprocessor, event_ndims, reparametrizable):\n \"\"\"Abstract class for parametric (action) distribution.\n\n Specifies how to transform distribution parameters (i.e. actor output)\n into a distribution over actions.\n\n Args:\n param_size: size of the parameters for the distribution\n postprocessor: tfp.bijector which is applied after sampling (in practice,\n it's tanh or identity)\n event_ndims: rank of the distribution sample (i.e. action)\n reparametrizable: is the distribution reparametrizable\n \"\"\"\n self._param_size = param_size\n self._postprocessor = postprocessor\n self._event_ndims = event_ndims # rank of events\n self._reparametrizable = reparametrizable\n assert event_ndims in [0, 1]\n\n @abc.abstractmethod\n def create_dist(self, parameters):\n \"\"\"Creates tfp.distribution from parameters.\"\"\"\n pass\n\n @property\n def param_size(self):\n return self._param_size\n\n @property\n def reparametrizable(self):\n return self._reparametrizable\n\n def postprocess(self, event):\n return self._postprocessor.forward(event)\n\n def inverse_postprocess(self, event):\n return self._postprocessor.inverse(event)\n\n def sample_no_postprocessing(self, parameters, seed):\n return self.create_dist(parameters).sample(seed=seed)\n\n def sample(self, parameters, seed):\n \"\"\"Returns a sample from the postprocessed distribution.\"\"\"\n return self.postprocess(self.sample_no_postprocessing(parameters, seed))\n\n def log_prob(self, parameters, actions):\n \"\"\"Compute the log probability of actions.\"\"\"\n dist = self.create_dist(parameters)\n log_probs = dist.log_prob(actions)\n log_probs -= self._postprocessor.forward_log_det_jacobian(\n jnp.asarray(actions, jnp.float32), event_ndims=0)\n if self._event_ndims == 1:\n log_probs = jnp.sum(log_probs, axis=-1) # sum over action dimension\n return log_probs\n\n def entropy(self, parameters, seed):\n \"\"\"Return the entropy of the given distribution.\"\"\"\n dist = self.create_dist(parameters)\n entropy = dist.entropy()\n entropy += self._postprocessor.forward_log_det_jacobian(\n jnp.asarray(dist.sample(seed=seed), jnp.float32), event_ndims=0)\n if self._event_ndims == 1:\n entropy = jnp.sum(entropy, axis=-1)\n return entropy\n\n\nclass NormalTanhDistribution(ParametricDistribution):\n \"\"\"Normal distribution followed by tanh.\"\"\"\n\n def __init__(self, event_size, min_std=0.001):\n \"\"\"Initialize the distribution.\n\n Args:\n event_size: the size of events (i.e. actions).\n min_std: minimum std for the gaussian.\n \"\"\"\n # We apply tanh to gaussian actions to bound them.\n # Normally we would use tfd.TransformedDistribution to automatically\n # apply tanh to the distribution.\n # We can't do it here because of tanh saturation\n # which would make log_prob computations impossible. Instead, most\n # of the code operate on pre-tanh actions and we take the postprocessor\n # jacobian into account in log_prob computations.\n super().__init__(\n param_size=2 * event_size,\n postprocessor=tfp.bijectors.Tanh(),\n event_ndims=1,\n reparametrizable=True)\n self._min_std = min_std\n\n def create_dist(self, parameters):\n loc, scale = jnp.split(parameters, 2, axis=-1)\n scale = jax.nn.softplus(scale) + self._min_std\n dist = tfd.Normal(loc=loc, scale=scale)\n return dist\n\nclass NormalDistribution(ParametricDistribution):\n \"\"\"Normal distribution followed by tanh.\"\"\"\n\n def __init__(self, event_size):\n \"\"\"Initialize the distribution.\n\n Args:\n event_size: the size of events (i.e. actions).\n min_std: minimum std for the gaussian.\n \"\"\"\n super().__init__(\n param_size=2 * event_size,\n postprocessor=tfp.bijectors.Identity(),\n event_ndims=1,\n reparametrizable=True)\n\n def create_dist(self, parameters):\n loc, scale = jnp.split(parameters, 2, axis=-1)\n scale = jax.numpy.exp(scale)\n dist = tfd.Normal(loc=loc, scale=scale)\n return dist","sub_path":"brax/training/distribution.py","file_name":"distribution.py","file_ext":"py","file_size_in_byte":4907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"562564121","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('cochera', '0015_auto_20141006_2235'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='vehiculo',\n name='descripcion',\n field=models.CharField(default=None, max_length=200, null=True, verbose_name=b'Descripci\\xc3\\xb3n', blank=True),\n ),\n migrations.AlterField(\n model_name='vehiculo',\n name='dominio',\n field=models.CharField(default=None, max_length=20, null=True, blank=True),\n ),\n ]\n","sub_path":"cochera/migrations/0016_auto_20141006_2242.py","file_name":"0016_auto_20141006_2242.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"286375160","text":"\"\"\"\nAuthor: angles\n13/01/17 - 17:18\n\"\"\"\nimport os\nfrom datetime import datetime\n\nimport numpy as np\nimport tensorflow as tf\nfrom tqdm import tqdm\n\nfrom architectures import generator\nfrom plotting_tools import save_img\nfrom utils import load_tf_model\n\nflags = tf.app.flags\nflags.DEFINE_integer(\"z_dim\", 32, \"dimension of the representation\")\nflags.DEFINE_integer(\"nb_samples\", 500, \"Training batch size\")\nflags.DEFINE_integer(\"output_size\", 64, \"Generated images' size, also cropping size\")\nflags.DEFINE_integer(\"nb_channels_output\", 3, \"3:RGB, 1:Gray-scale\")\nflags.DEFINE_string(\"dataset\", 'convex_hulls_70000', \"Folder that contains the model\")\nflags.DEFINE_string(\"name_experiment\", None, \"Folder that contains the model\")\nflags.DEFINE_integer(\"nb_channels_first_layer\", 16, \"Nb. channels that parammetrizes the networks' size\")\nFLAGS = flags.FLAGS\n\n# np.random.seed(1)\n\nif FLAGS.name_experiment is not None:\n dir_experiment = os.path.join('experiments', FLAGS.name_experiment)\nelse:\n name_experiment = FLAGS.dataset + '_z{0}_fl{1}_up_nor'.format(FLAGS.z_dim, FLAGS.nb_channels_first_layer)\n dir_experiment = os.path.join('experiments', name_experiment)\n\nprint(\"Dir. Experiment: {0}\".format(dir_experiment))\n\n# Build computational graph\n###########################\nshape_z = (FLAGS.nb_samples, FLAGS.z_dim)\nz_tensor = tf.placeholder(tf.float32, shape_z, name='z')\noutput_shape = (FLAGS.output_size, FLAGS.output_size, FLAGS.nb_channels_output)\nwith tf.variable_scope('generation') as scope:\n outputs_ = generator(z_tensor, output_shape, FLAGS.nb_channels_first_layer, is_training=False)\nsaver = tf.train.Saver()\nnb_layers = len(outputs_)\n\n# Generate the path in the latent space\n#######################################\n# z0 = np.random.uniform(-1, 1, size=(1, FLAGS.z_dim))\n# z1 = np.random.uniform(-1, 1, size=(1, FLAGS.z_dim))\nz0 = np.random.normal(size=(1, FLAGS.z_dim))\nz1 = np.random.normal(size=(1, FLAGS.z_dim))\nz = np.copy(z0)\n\ninterval = np.linspace(0, 1, FLAGS.nb_samples)\nfor t in interval:\n if t > 0:\n zt = (1 - t) * z0 + t * z1\n z = np.vstack((z, zt))\n\n# Create a session for running operations in the graph\n######################################################\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer()) # Initialize all the variables\n load_success, ckpt_name = load_tf_model(dir_experiment, sess, saver)\n if load_success:\n print(\" [*] Load SUCCESS\")\n else:\n print(\" [!] Load failed...\")\n outputs = sess.run(outputs_, feed_dict={z_tensor: z})\n\nsamples = outputs[nb_layers - 1]\nsamples = np.add(samples, 1) / 2\n\n# plt.hist(samples.ravel(), bins=1000)\n# plt.show()\n\nfolder = 'path_from_' + ckpt_name + datetime.now().strftime(\"_%d%m%Y_%H%M%S\")\nfolder = os.path.join(dir_experiment, folder)\n\nif not os.path.exists(folder):\n os.makedirs(folder)\n\nfor idx_sample in tqdm(range(FLAGS.nb_samples)):\n filename = os.path.join(folder, 'sample_{0}.jpg'.format(idx_sample))\n save_img(filename, samples[idx_sample, :, :, :])\n # plot_images([samples[idx_sample, :, :, 0]], gray=True, save=True, file_name=filename)\n","sub_path":"path_two_images.py","file_name":"path_two_images.py","file_ext":"py","file_size_in_byte":3118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}